content
stringlengths 7
2.61M
|
---|
<gh_stars>0
#ifndef DT_H_
#define DT_H_
#define DAY_MINUTES (24UL * 60UL)
#define YEAR_MINUTES (unsigned long)(365UL * DAY_MINUTES)
#ifdef WIN32
#define TERR_NO_ERR 0
#define TERR_TIMEOUT -110
#define TN_INTSAVE_DATA
#define TOUT_110_MSEC 110
#endif
#define ADD_NEW_TIMER (-1)
#define ERR_BYTE 0xFFU
#define ERR_USHORT 0xFFFFU
typedef struct _LTT_DT
{
unsigned short tm_sec; //<! Reserved here
unsigned short tm_min; //<! 0..59
unsigned short tm_hour; //<! 0..23
unsigned short tm_mday; //<! 1..31
unsigned short tm_mon; //<! 1..12
unsigned short tm_year; //<! 0-2000, 1-2001 etc.
unsigned short tm_wday; //<! 0-Sunday,6-Saturday - not used here
}LTT_DT;
typedef struct _LTT_DT_PK //!< size 32 bits
{
unsigned long min: 6; //!< 0..59
unsigned long hour: 5; //!< 0..23
unsigned long day_m: 5; //!< 1..31
unsigned long day_w: 3; //!< 0-Sun 6 Sat
unsigned long month: 4; //!< 1..12
unsigned long year: 7; //!< 0-2000 1-2001 ... 100 - 2100
unsigned long aux: 2; //!< 0
}LTT_DT_PK;
/**
*
* This is a global RTC data structure(single instance for the project)
*/
typedef struct _RTCDATA
{
LTT_DT rtc_val[2]; //!< Two instances for the Ping-Pong operating
LTT_DT * valid_dt; //!< Pointer to valid data/time info structure
LTT_DT * op_dt; //!< Pointer to data/time info structure to fill
int enable_ltt_check; //!< For lt timers recreation
LTT_DT dt_prev; //!< For lt timers recreation
}RTCDATA;
/**
* Long Term Timer operating mode
*/
#define LTT_REGULARE_MODE 16U //!< long term timer operating mode
#define LTT_WEEK_MODE 32U //!< long term timer operating mode
#define LTT_RELATIVE_MODE 64U //!< long term timer operating mode
#define LTT_ONESHOT 128U //!< long term timer operating mode
#define LTTL_EXEC_NOW 512U //!< long term timer flag - exec mode flag (internal)
#define LTTL_IS_FREE 1024U //!< long term timer flag - slot is free/occuped (internal)
// * Config
#define BASE_YEAR 2000 /**< Minimal(base) year for RTC operations */
#define MAX_YEAR 2100 /**< Maximal year for RTC operations */
//------------------------------------------
/**< Mode to distinguish between create or re-create timer options */
#define ADD_TIMER (-1)
/**
*
* the prorotype for the lt timer callback function
*/
typedef int (* timer_lt_handler_func) (int timer_id, LTT_DT_PK lt_timestamp, void * data);
/**
*
* this is entry in the lt timers table
*/
typedef struct _LTTMRENTRY //-- 24 bytes
{
LTT_DT_PK base_dt; //!< the base timer date/time
LTT_DT_PK exec_dt; //!< the timer current(next) expiring data/time
unsigned long timer_id; //!< timer ID
unsigned long timeout; //!< timer timeout(for LTT_REGULARE_MODE only)
unsigned int flags; //!< for internal operating
timer_lt_handler_func callback_func; //!< timer callback function - may be NULL
void * callback_func_data; //!< timer callback function data - may be NULL
#ifdef WIN32
int start_tick;
#endif
}LTTMRENTRY;
//----------------------------------------------------------
#ifdef WIN32
typedef struct _TN_SEM
{
HANDLE hSem;
}TN_SEM;
int tn_sem_create(TN_SEM * sem, int init_count);
int tn_sem_acquire(TN_SEM * sem, long timeout);
int tn_sem_signal(TN_SEM * sem);
#endif
BOOL is_leap_year(int year);
unsigned long conv_tm_to_min(LTT_DT * tm);
int conv_min_to_tm(unsigned long min,
LTT_DT * tm); // [OUT]
int get_max_day(int month, int year);
int exec_emu_alarm_callback(int idx);
int emu_run_rtc(int * res);
int ltt_alarm_proc(void * par);
#if 0
typedef struct _TESTARRITEM
{
BOOL alarm_event;
unsigned long timeout;
LTT_DT_PK base_dt; //!< the base timer date/time
LTT_DT_PK exec_dt; //!< the timer current(next) expiring data/time
struct tm curr_dt;
int ticks;
}TESTARRITEM;
#endif
typedef struct _LTTSTAT
{
int registered_cnt;
int fail_registered_cnt;
int recreate_cnt;
int unregistered_cnt;
int not_free_cnt;
int exp_cnt;
int exp_ok_cnt;
int exp_fail_cnt;
int time_change_cnt;
}LTTSTAT;
LTTMRENTRY * ltt_timers_get_arr(void);
void check_and_dump_expiring_result(int idx);
int dt_to_pk(LTT_DT * src,
LTT_DT_PK * dst);
int pk_to_dt(LTT_DT_PK * src,
LTT_DT * dst);
int ltt_timers_init(void);
int ltt_timers_recreate(void);
int ltt_unregister_timer(long timer_id);
int rtc_init(RTCDATA * prtc, LTT_DT * ref_dt);
int rtc_set_clock(LTT_DT * dt);
int do_rtc_set_clock(LTT_DT * dt);
int rtc_get_clock(LTT_DT * dt);
int rtc_bin_to_bcd(LTT_DT * dt);
int rtc_dt_pk_to_bcd(LTT_DT * dst,
LTT_DT_PK * src);
int rtc_bcd_to_bin(LTT_DT * dt);
long ltt_register_timer(int mode,
LTT_DT * ltdt, // reserved, may be NULL
unsigned long timeout,
void * data,
timer_lt_handler_func timer_callback);
int rtc_get_clock_bcd_async(void);
void change_emu_time(int min_to_add);
void dump_stat(void);
void calc_not_free_timer(void);
#define TOUT_110_MSEC 28U
#define TOUT_200_MSEC 51U
#define TOUT_1_SEC 256UL
#define TOUT_5_SEC (5UL * 256UL)
#define TOUT_10_SEC (10UL * 256UL)
#define TOUT_20_SEC (20UL * 256UL)
#endif
|
<gh_stars>1-10
#include <string>
#include "utilities.h"
bool Utilities::equalsIgnoreCase(const char & a, const char & b)
{
return tolower(a) == tolower(b);
}
bool Utilities::equalsIgnoreCase(const std::string & a, const std::string & b)
{
if (a.size() != b.size())
{
return false;
}
for (std::string::size_type i = 0; i < a.size(); ++i)
{
if (!equalsIgnoreCase(a[i], b[i]))
{
return false;
}
}
return true;
}
|
TRENTON -- A day after Atlantic City's mayor said he will soon shut down city hall and not pay employees amid an ever-growing financial crisis, Gov. Chris Christie repeated Tuesday that he will not provide a loan or send rescue aid without a state takeover of the city.
"The mayor can decide to do one of two things: Either cooperate ... or the inevitable will occur," Christie told reporters at a news conference in Long Branch. "And the inevitable is that they will face bankruptcy."
"And if they do," the governor added, "then the bankruptcy court will control their fate, not the state of New Jersey. If that is what they prefer, it is their choice."
The comments are the latest in a weeks-long standoff over how to keep Atlantic City, New Jersey's only casino gambling resort town, from going bankrupt -- a scenario experts say would hurt the credit rating of municipalities across the state.
With the city weeks away from running out of money, Mayor Don Guardian traveled to Trenton on Monday to plead Atlantic City's case and ask the state for a bridge loan. But the Christie administration refused.
Hours later, Guardian announced the city will cease "non-essential" government operations from April 8 to May 2, when more tax money comes in.
He said essential services like police, fire, revenue collections, and some public works functions will continue. But no worker, the mayor said, will get paid in that time.
On Tuesday, Christie called a bridge loan nothing but a "Band-Aid" on a bigger problem that Atlantic City officials are "unwilling and incapable of fixing."
"I am no longer going to allow the taxpayers of New Jersey to be responsible," the governor said.
Instead, Christie once again called on the state Assembly to pass controversial legislation that would allow the state to take over key functions of Atlantic City's government -- including restructuring debt, breaking union contract, and selling off city assets. The governor and state Senate President Stephen Sweeney (D-Gloucester) have been pushing the takeover as the best way to reverse the city's issues.
Guardian and other local officials argue that the takeover goes too far and would amount to a "fascist dictatorship."
And while the Senate approved the takeover last week, Assembly Speaker Vincent Prieto has called for a new deal that protects union's collective bargaining rights. Christie reiterated Tuesday that he won't sign the legislation if it's changed.
"If what that means is that Atlantic City goes bankrupt, then go to Vincent Prieto's office and ask him why," the Republican governor said. "I am not going to negotiate with two sets of Democrats, quite frankly."
Prieto on Tuesday accused Christie of "not doing his job." He argued that current state law already allows the state to do many of the things the takeover seeks and that allowing Atlantic City to go bankrupt will cost the state more money.
"He is the executive," Prieto told NJ Advance Media. "At the end of the day, it's the governor that's going to be responsible."
The speaker also noted that the state has had some control in Atlantic City since 2010, when it took over the city's tourism district and places a state monitor in city hall. It also installed an emergency manager there last year.
"There was a five-year plan," Prieto said. "That's what has failed. That's what we have to point the fingers at."
Guardian blames Christie for vetoing a rescue package in January that included changes he requested. The package would have given the city $33.5 million in aid that the state told the city to include in its current municipal budget. But Christie said he rejected it because the city hasn't done enough to fix its problems.
The Senate approved a new aid bill last week that essentially replaces the one Christie vetoed. But the governor said he won't sign it without the takeover.
A spokesman for Guardian's office declined comment until Thursday, when the mayor will hold a news conference on the issue at city hall.
Atlantic City has given billions of dollars to the state in casino tax revenue since gambling was legalized there in 1976. But four of the city's casinos have closed in in recent years amid increasing competition from gambling halls in neighboring states, causing the city's casino tax revenue to be sliced in half.
Gov. Christie Announces Recovery Coaches Program While At Monmouth Medical Center 12 Gallery: Gov. Christie Announces Recovery Coaches Program While At Monmouth Medical Center
Brent Johnson may be reached at bjohnson@njadvancemedia.com. Follow him on Twitter @johnsb01. Find NJ.com Politics on Facebook. |
(CNN) Vice President Joe Biden in his first public remarks since Donald Trump became President-elect, said that there would be no "diminution" to the American-Israel alliance and advised that the American people at large are the "ultimate guarantor to every president on every transition."
Biden, sounding somewhat defeated, was speaking at a dinner for the World Jewish Congress -- a group representing Jewish communities in over 100 countries--- where he spoke of a Trump presidency specifically in terms of an American-Israel relationship, saying that there would be "no diminution of support" in the next administration.
"A number of my friends in the community are anxious about what it'll mean for America's commitment to Israel. I stand here to tell you that I have no doubt, none whatsoever, that in the Trump administration there will be no diminution of support as a consequence of this transition," Biden said.
"Even if a new administration were inclined to reduce a commitment, which it's not. Congress would never let it happen. The American people would never let it happen," he added.
Trump in a video message to supporters in Israel last month, Trump said he would "make America and Israel safe again" and that his administration would "stand side-by-side with the Jewish people."
Read More |
/*
* Copyright 2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.migrationanalyzer.contributions.bytecode;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStream;
import java.io.Reader;
import org.junit.Test;
import org.springframework.migrationanalyzer.analyze.fs.FileSystemEntry;
import org.springframework.migrationanalyzer.analyze.fs.FileSystemException;
import org.springframework.migrationanalyzer.analyze.support.AnalysisFailedException;
public class DelegatingByteCodeEntryAnalyzerTests {
private final StubResultGatheringClassVisitorFactory factory = new StubResultGatheringClassVisitorFactory();
private final DelegatingByteCodeEntryAnalyzer analyzer = new DelegatingByteCodeEntryAnalyzer(this.factory);
@Test
public void analyze() throws AnalysisFailedException {
this.analyzer.analyze(new StubFileSystemEntry(false));
assertTrue(this.factory.getVisitor().getGetResultsCalled());
}
@Test
public void analyzeDirectory() throws AnalysisFailedException {
this.analyzer.analyze(new StubFileSystemEntry(true));
assertFalse(this.factory.getVisitor().getGetResultsCalled());
}
private static class StubFileSystemEntry implements FileSystemEntry {
private final boolean isDirectory;
public StubFileSystemEntry(boolean isDirectory) {
this.isDirectory = isDirectory;
}
@Override
public InputStream getInputStream() {
try {
return new FileInputStream(
"target/test-classes/org/springframework/migrationanalyzer/contributions/bytecode/DelegatingByteCodeEntryAnalyzerTests.class");
} catch (FileNotFoundException e) {
throw new FileSystemException(e);
}
}
@Override
public String getName() {
return "test.class";
}
@Override
public Reader getReader() {
return null;
}
@Override
public boolean isDirectory() {
return this.isDirectory;
}
}
}
|
Missouri Route 370
History
Construction on Route 370 began in the early 1990s. It was built to replace the section of St. Charles Rock Road, then designated as Route 115, west of Interstate 270.
Route 370 was considered to be Interstate 370 but AASHTO would only allow it if it was renumbered Interstate 870. MODOT did not want this because the designation of Interstate 870 was for the new southern freeway which is now designated as Interstate 255.
On December 16, 1992, the first section of the highway opened allowing direct access to St. Charles at Route 94 from I-270. It replaced the crumbling Old St. Charles Bridge that carried Route 115 traffic. On April 1, 1993, the road was designated as Route 370. In 1993, the Great Flood of 1993 surrounded much of the highway which is built on top of an embankment. As the flood waters surrounded the highway, many of the new exits had been shut down because the roads it connected to were underwater. Despite the flood, construction resumed on the highway until it was completed in 1996. |
Climate change is a crisis, and crises alter morality. Climate change is on track to cause the extinction of half the species on earth and, through a combination of droughts, famines, displaced people, and failed states and pandemics, the collapse of civilization within this century. If this horrific destructive force is to be abated, it will be due to the efforts of people who are currently alive. The future of humanity falls to us. This is an unprecedented moral responsibility, and we are by and large failing to meet it.
Indeed, most of us act as though we are not morally obligated to fight climate change, and those who do recognize their obligation are largely confused about how to meet it.
Crises alter morality; they alter what is demanded of us if we want to be considered good, honorable people. For example—having a picnic in the park is morally neutral. But if, during your picnic, you witness a group of children drowning and you continue eating and chatting, passively ignoring the crisis, you have become monstrous. A stark, historical example of crisis morality is the Holocaust—history judges those who remained passive during that fateful time. Simply being a private citizen (a “Good German”) is not considered honorable or morally acceptable in retrospect. Passivity, in a time of crisis, is complicity. It is a moral failure. Crises demand that we actively engage; that we rise to the challenge; that we do our best.
What is the nature of our moral obligation to fight climate change?
Our first moral obligation is to assess how we can most effectively help. While climate change is more frequently being recognized as a moral issue—the question, “How can a person most effectively engage in fighting climate change?” is rarely seriously considered or discussed. In times of crises, we can easily become overwhelmed with fear and act impetuously to discharge those feelings to “do something.” We may default to popular or well-known activism tactics, such as writing letters to our congress people or protesting fossil-fuel infrastructure projects without rigorously assessing if this is the best use of our time and talents.
"Our civilization, planet, and each of us individually are in an acute crisis, but we are so mired in individual and collective denial and distortion that we fail to see it clearly."
The question of “how can I best help” is particularly difficult for people to contemplate because climate change requires collective emergency action, and we live in a very individualistic culture. It can be difficult for an individual to imagine themselves as helping to create a social and political movement; helping the group make a shift in perspective and action. Instead of viewing themselves as possibly influencing the group, many people focus entirely on themselves, attempting to reduce their personal carbon footprint. This offers a sense of control and moral achievement, but it is illusory; it does not contribute (at least not with maximal efficacy) to creating the collective response necessary.
We need to mobilize, together. Climate change is a crisis, and it requires a crisis response. A wide variety of scientists, scholars, and activists agree: the only response that can save civilization is an all-out, whole-society mobilization. World War II provides an example of how the United States accomplished this in the past. We converted our industry from consumer-based to mission-based in a matter of months; oriented national and university research toward the mission, and mobilized the American citizenry toward the war effort in a wide variety of ways. Major demographic shifts were made to facilitate the mission, which was regarded as America’s sine qua non; for example, 10% of Americans moved to work in a “war job,” women worked in factories for the first time, and racial integration took steps forward. Likewise, we must give the climate effort everything we have, for if we lose, we may lose everything.
Where we are. While the need for a whole society and economy mobilization to fight climate change is broadly understood by experts, we are not close to achieving it as a society. Climate change ranks at the bottom of issues that citizens are concerned about. The climate crisis is rarely discussed in social or professional situations. This climate silence is mirrored in the media and political realm: for example, climate change wasn’t even mentioned in the 2012 presidential debates. When climate change is discussed, it is either discussed as a “controversy” or a “problem” rather than the existential emergency that it actually is. Our civilization, planet, and each of us individually are in an acute crisis, but we are so mired in individual and collective denial and distortion that we fail to see it clearly. The house is on fire, but we are still asleep, and our opportunity for being able to save ourselves is quickly going up in smoke.
Understanding the gap: The role of pluralistic ignorance. How can this be? How are we missing the crisis that will determine the future of our civilization and species? Dr. Robert Cialdini, social psychologist and author of Influence, describes the phenomena of “pluralistic ignorance,” which offers tremendous insight into this question—and into how we can beat the trance of denial and passivity.
In the following passage, Dr. Cialdini is not discussing climate change, but rather, the phenomena of emergencies (heart attacks, physical assaults, etc.) that are sometimes witnessed—and ignored— by dozens of people, especially in urban settings. These tragic instances are often ascribed to “apathy”—the hardening of city dwellers’ hearts toward each other. But scientific research shows something very different. Research shows that if one person witnesses an emergency, they will help in nearly 100% of instances. It is only in crowds—and in situations of uncertainty—that we have the capacity, even the tendency, to ignore an emergency.
Very often an emergency is not obviously an emergency. Is the man lying in the alley a heart-attack victim or a drunk sleeping one off? Are the sharp sounds from the street gunshots or truck backfires? Is the commotion next door an assault requiring the police or an especially loud marital spat where intervention would be inappropriate and unwelcome? What is going on? In times of such uncertainty, the natural tendency is to look around at the actions of others for clues. We can learn, from the way the other witnesses are reacting, whether the event is or is not an emergency. What is easy to forget, though, is that everybody else observing the event is likely to be looking for social evidence, too. And because we all prefer to appear poised and unflustered among others, we are likely to search for that evidence placidly, with brief, camouflaged glances at those around us. Therefore everyone is likely to see everyone else looking unruffled and failing to act. As a result, and by the principle of social proof, the event will be roundly interpreted as a nonemergency.
This, according to [social psychology researchers] Latané and Darley, is the state of pluralistic ignorance “in which each person decides that since nobody is concerned, nothing is wrong. Meanwhile, the danger may be mounting to the point where a single individual, uninfluenced by the seeming calm of others, would react.”
These paragraphs vividly illustrate how denial of the climate crisis is cocreated through the effect of pluralistic ignorance. We look around us and see people living their lives as normal. Our friends, coworkers, and family members are all going about their days as they always have. They are planning for the future. They are calm. They are not discussing climate change. So surely there is no emergency. Surely civilization is not in danger. Calm down, we tell ourselves, I must be the only one who is afraid.
This situation creates an intense amount of social pressure to act calm and not appear hysterical or “crazy.” We all want to fit in, to be well liked and to be considered “normal.” As of today, that means remaining silent on the effects of climate change, or responding with minimization, cynicism, or humor. It is taboo to discuss it as the crisis it is, a crisis that threatens all of us, and that we each have a moral obligation to respond to.
Of course, this pluralistic ignorance of the climate emergency is reinforced and bolstered through misinformation campaigns funded by fossil-fuel companies and the hostility of the few. “Better not bring up the climate crisis,” we tell ourselves, “It’s a controversial topic. Someone might really lose their temper.” However, the responsibility for pluralistic ignorance is widely shared. The vast majority of us—including those of us who believe in climate science and are terrified by climate change—are still, unwittingly, contributing to pluralistic ignorance.
How can we meet our moral obligation, and effectively fight climate change?
Certainty dispels pluralistic ignorance. Fortunately, the research on pluralistic ignorance and crisis response provides excellent guidance for how to overcome this trance of collective denial. The research shows that humans are actually strongly motivated to act in a crisis—as long as they are sure that there is a crisis and that they have a role in solving it. As Dr. Cialdini describes,
Groups of bystanders fail to help because the bystanders are unsure rather than unkind. They don’t help because they are unsure of whether an emergency actually exists and whether they are responsible for taking action. When they are sure of their responsibilities for intervening in a clear emergency, people are exceedingly responsive!
Dr. Cialdini provides a vivid example of how to apply this knowledge to a personal emergency—if you begin experiencing the symptoms of a stroke in a public place. As you start to feel ill, you slump against a tree, but no one approaches you to help. If people are worried about you, they look around, see everyone else acting calm, and decide that there is no emergency and no need to intervene. People are taking cues from each other to deny and ignore your crisis. How can you call forth the emergency intervention you need?
SCROLL TO CONTINUE WITH CONTENT Help Keep Common Dreams Alive Our progressive news model only survives if those informed and inspired by this work support our efforts
Stare, speak, and point directly at one person and no one else: “You, sir, in the blue jacket, I need help. Call an ambulance.” With that one utterance you should dispel all the uncertainties that might prevent or delay help. With that one statement you will have put the man in the blue jacket in the role of “rescuer.” He should now understand that emergency aid is needed; he should understand that he, not someone else, is responsible for providing the aid; and, finally, he should understand exactly how to provide it. All the scientific evidence indicates that the result should be quick, effective assistance.
Humans contain a great capacity to help each other, to dutifully respond to the needs of others, and to improve the world around us. We also have a need to feel good about ourselves, and that includes fulfilling our moral obligations. When it is clear there is an emergency, and we have a vital role in responding to it, we respond vigorously.
Climate change is a crisis, and it is your responsibility. Effectively intervening in pluralistic ignorance should be considered the primary goal of the climate movement. Climate change is a crisis that demands a massive collective response. This truth will become crystal clear if we overcome the forces of denial and pluralistic ignorance.
To call forth an emergency response from people, we have to put them in the role of rescuer. We must make clear that (1) an emergency is unfolding and (2) YOU have a critical role in responding to it.
Breaking from standard climate communications.
The environmental movement has not yet made either of these points clear. Indeed, the dominant school of thought in climate communications that says we must underplay the severity of the climate crisis to avoid “turning people off,” and we must emphasize individual reduction of emissions in order to provide people a sense of efficacy.
"Our moral obligation to fight climate change is to build a collective solution, not to purify ourselves as individual consumers."
Avoiding or finessing the frightening truths of climate change is not only ethically dubious, it is also bound for failure. If we want people to respond appropriately to the climate crisis, we have to level with them, and if we want to claim the moral high ground, we cannot distort the truth just because it’s easier.
A major reason that climate communications have been so milquetoast is that they have lacked a large-scale social movement and political strategy that individuals can be a meaningful part of. Instead, individuals have been addressed as “consumers” who should strive to minimize their individual carbon footprint or environmental impact. This approach is nonsocial and nonpolitical and casts individuals as perpetrators who should attempt to reduce the amount of harm they are causing, rather than rescuers who can make a meaningful contribution to a collective solution.
This point deserves emphasis, as it is so often misunderstood in our intensely individualistic culture. Our moral obligation to fight climate change is to build a collective solution, not to purify ourselves as individual consumers. This common response to the climate crisis can even be counterproductive in several ways: (1) it keeps the burden of responding to climate change on the individual, implicitly rejecting the idea of a collective response; (2) it perpetuates the message that there is no crisis by demanding only slight modifications to “business as usual”; and (3) it is often perceived as “holier than thou,” which can create the perception of barriers to entry to the movement. For example, a person might be deeply concerned about the climate crisis but feel they lack “standing” to voice their feelings because they eat meat or fly to Europe.
We must create an atmosphere in which active engagement in the climate crisis is considered a fundamental part of living a moral life. To accomplish this, we have to give people opportunities to be a meaningful part of the solution; we have to give them the opportunity to be rescuers.
The Pledge to Mobilize: A tool that creates rescuers.
I have worked for the past 18 months with The Climate Mobilization—a growing network of teammates, allies, and consultants to develop a tool intended to help individuals intervene in collective denial and pluralistic ignorance and call forth the all-out emergency response needed to protect civilization and the natural world.
The Pledge to Mobilize is a one-page document that any person can sign. The Pledge is several things at once— it is a public acknowledgment that the climate crisis threatens civilization, an endorsement of a World War II–scale mobilization that brings the United States to carbon neutrality by 2025 (by far the most ambitious emissions reduction goal proposed), and a set of personal commitments to help enact this mobilization. When someone signs, they pledge to (1) vote for candidates who have publicly endorsed the Climate Mobilization platform over those who have not; (2) only donate time and money to candidates who have endorsed the mobilization platform, and (3) mobilize their “skills, resources, and networks to spread the truth of climate change, and the hope of this movement, to others.”
The Pledge provides a bridge between individual and collective action—the actions that Pledgers agree to are all social and political in nature: things that one person can do to influence the group. Most important is personal commitment: #3— to spread the truth of climate change, and the Pledge itself. This is a strategy to reverse pluralistic ignorance and social pressure, which is supported by psychological research. People who take the Pledge start conversations with their friends and family about the climate crisis that include realistic solutions. This means that talking about climate change doesn’t mean just bearing bad news—but also showing the way forward—helping to channel the panic and despair that climate truth can evoke.
Since we started spreading the Pledge to Mobilize two-and-a-half months ago, we have seen many positive indicators of the Pledge’s ability to fight pluralistic ignorance and put individuals in the role of rescuers. Many (though not all) people who take the Pledge to Mobilize have continued to deepen their involvement from there, speaking more about climate change, reaching out to friends, family, and even strangers to discuss the topic. Mobilizers have educated themselves more deeply about climate change, fundraised for The Climate Mobilization, and taken on a variety of organizing and administrative tasks. Some have even gone as far as to rearrange or reduce their work schedules to have more time available to contribute. These are individuals who have left the fog of pluralistic ignorance, accepted the certainty that there is a crisis and that they have a moral obligation to act as a rescuer. Now they are attempting to spread that certainty to others.
Conclusion: Don’t wait for Pearl Harbor
On December 7, 1941, the United States experienced a sudden, collective exit from pluralistic ignorance. Before Pearl Harbor, the country was mired in the denial of isolationism. “The war doesn’t concern us,” we told ourselves. “Lets stay out of it.” With one devastating surprise attack, that pluralistic ignorance transformed into a culture of mobilization, in which every citizen had a role to play in supporting the war effort—every American became a rescuer—a critical part of a shared mission.
Many scientists and scholars who recognize the need for a World War II–scale climate mobilization believe that some catastrophic event—a super-storm, a drought, or an economic collapse, will similarly jolt us out of our collective climate denial. There is reason to doubt this, however, given how much more complicated climate change is than a surprise attack. Further, we have a moral obligation to achieve this collective awakening as soon as possible.
Talking about the climate crisis candidly and our moral obligation to stand against it— whether using the Pledge to Mobilize, or not—helps prepare people to see the crisis. Conversations that seem unsuccessful may alter how the person processes climate-related disasters in the future, or make them more likely to seek out or absorb information about the crisis.
Give it a try. Talk with five people about the climate crisis this week. Talk about how afraid you are, and how you feel it is a moral obligation to spread the fact that we are in a crisis. Consider taking the Pledge to Mobilize—it will provide you with a tool to help you intervene in pluralistic ignorance, as well as a community of individuals who are committed to this approach. It takes courage to face climate change honestly, and discussing it with other people puts you at risk of rejection and hostility. But morality demands we do what is right, not what is easy. We must rise to the challenge of our time, together. |
One case of intrahepatic cholangiocarcinoma amenable to resection after radioembolization. We report the case of a 57-year-old man who was diagnosed with a large unresectable cholangiocarcinoma associated with 2 satellite nodules and without clear margins with the right hepatic vein. Despite 4 cycles of GEMOX (stopped due to a hypertransaminasemia believed to be due to gemcitabine) and 4 cycles of FOLFIRINOX, the tumor remained stable and continued to be considered unresectable. Radioembolization (resin microspheres, SIRS-spheres) targeting the left liver (474 MBq) and segment IV (440 MBq) was performed. This injection was very well tolerated, and 4 more cycles of FOLFIRINOX were given while waiting for radioembolization efficacy. On computed tomography scan, a partial response was observed; the tumor was far less hypervascularized, and a margin was observed between the tumor and the right hepatic vein. A left hepatectomy enlarged to segment VIII was performed. On pathological exam, most of the tumor was acellular, with dense fibrosis around visible microspheres. Viable cells were observed only at a distance from beads. Radioembolization can be useful in the treatment of cholangiocarcinoma, allowing in some cases a secondary resection. INTRODUCTION Intrahepatic cholangiocarcinoma (ICC) is the second most common primary liver cancer after hepatocellular carcinoma, with approximately 10000 new cases/year in Europe, and exhibits a dismal prognosis. The incidence of ICC is growing in many countries. Cure can be expected only from surgical resection and in the early stages. However, the vast majority of patients presents with advanced disease or experience tumor recurrence after initial resection. In locally advanced or metastatic patients, systemic chemotherapy combining cisplatin and gemcitabine is the current gold standard, but tumor response and overall survival remain poor. Intra-arterial treatment in locally advanced unresectable cases seems promising in cases of liver-confined disease. Radioembolization with 90Y-loaded beads has been reported in a few studies as efficient in ICC. We present the case of a locally advanced ICC receiving systemic chemotherapy without major efficacy, followed by treatment with 90Y radioembolization (resin microspheres) that permitted resection with major (near complete) histologic response related histologically to the radioembolization. CASE REPORT A 57-year-old man without any past history presented with abdominal pain on his right side in December 2011. Ultrasound (US) and computed tomography (CT) scan demonstrated a large tumor on the median part of the liver without any abdominal lymph nodes or extrahepatic tumors. Alpha-fetoprotein levels were 29 ng/mL (ULN = 5 mg/mL), and carcinoembryonic antigen and carbohydrate antigen 19-9 serum levels were normal. On magnetic resonance imaging, an 11-cm nodule with two satellite tumors was identified. The main tumor was invading the left portal pedicle and the left and median supra-hepatic veins and exhibited no security margin with the right supra-hepatic vein ( Figure 1A) and the right hepatic artery. Colonoscopy and gastroscopy were normal. Pathological analysis of the liver biopsy confirmed an ICC; the surrounding liver parenchyma was normal. The patient was treated with a GEMOX regimen (gemcitabine 1000 mg/m D1 and oxaliplatin 100 mg/m D2) every 2 wk. After the fourth cycle, the appearance of hepatic cytolysis likely due to gemcitabine led us to stop this treatment and to shift to a FOLFIRINOX regimen. After 4 cycles, the CT scan revealed a stable disease, and resection was considered impossible. Local hepatic treatment with radioembolization was subsequently attempted. A biodistribution analysis of 99 mTc macroaggregated albumin injected in the target arteries did not reveal any lung shunting of extrahepatic uptake. The therapeutic injection was performed on 23 rd July 2012 with two selective injections: one in the left hepatic artery of 474 Mbq of 90Y-resin microspheres (SIRS-Spheres ®, Sirtex Medical, Lane Cove, Australia) and the other of 440 Mbq in the segment IV artery arising from the right hepatic artery. Dosimetry calculations (BSA method) corresponded to 120 Gy delivered to the tumor, 7 Gy to the non-tumorous liver and 4 Gy to the lungs. No side effects were noted, and 4 more cycles of FOLFIRINOX were administered. In September 2012 (2 mo after the radioembolization), the CT scan revealed a partial response ( Figure 1B), but at the arterial phase, the hypervascular component of the tumor had clearly declined, and a margin between the tumor and the right hepatic vein could be observed. The volume of the left lobe only slightly increased after radioembolization from 1323 mL up to 1420 mL. A left hepatectomy enlarged to segment Ⅷ was performed on October 30 th, 2012 without major difficulty. The pathological examination revealed that most of the tumor was composed of acellular, dense, collagen fibrosis with many beads included. The center of the tumor was entirely fibrotic; at the periphery of the tumor (Figure 2), some neoplastic cords could be identified in the fibrosis at a distance from beads; these cords were largely unicellular and sometimes organized around a glandular cavity. This response was classified as a major tumoral regression with a R0 resection. The patient was alive without evidence of recurrence 1 year after the surgery. DISCUSSION This case of partial radiologic tumor response allowing a complete R0 tumor resection and major histological tumor response after chemotherapy and one single radioembolization illustrates the usefulness of multidisciplinary approaches in locally advanced liver tumors, particularly ICC, and the efficacy of radioembolization. In this case, pathological examination revealed a close relationship between the presence of beads and severe necrosis/fibrosis; conversely viable tumor cells were only observed at the periphery of the tumor, at considerable distances from beads. Radioembolization involves the injection of microspheres loaded with 90Y into the feeding artery. These spheres had a diameter ranging from 25 to 60 m. Currently, two different types of microspheres are available, glass (TheraSphere ®, Nordion, Canada) and resin (SIR Sphere, SIRTEX, Australia), and these differ in size and activity per sphere, which is important for glass spheres but less important for resin microspheres. This treatment achieves the microembolization of tumorous vessels and delivers local irradiation; 90Y is a very energetic isotope with a cytotoxic range of several millimeters (median 2.5 mm) and a short half-life of 64.2 h. This isotope is only a beta emitter, and patients can be discharged the same day. This treatment is a therapeutic option in hepatocellular carcinoma and in hepatic colorectal metastases. Largescale randomized trials are ongoing to determine the best place for these loco-regional treatments. Few data are available on 90Y radioembolization in advanced cholangiocarcinoma ; both resin and glass microspheres have been used. All series are retrospective and have confirmed tolerance to this therapeutic option. The response rate is difficult to summarize, as some series used the classical Response Evaluation Criteria In Solid Tumors (RECIST)/WHO criteria, whereas others used EASL criteria or mRECIST more "logically" with this approach to measure the vascularized part of the tumor. The response rate was approximately 25%-30% using WHO or RECIST and higher (73%) with EASL. In one series, 5 of 46 patients benefited from downstaging from an R0 surgery, as in our case. In most cases, the future remnant liver volume increased after radioembo-lization of the contralateral lobe ; this increase was not obvious in our case. Therefore, in unresectable but localized ICC, radioembolization can be considered a useful tool that results in curative resection in some cases. This option should be considered in some borderline cases for surgical resection. Case characteristics Authors report the case of a 57-year-old man who was diagnosed with a large unresectable cholangiocarcinoma associated with 2 satellite nodules and without clear margins with the right hepatic vein. Clinical diagnosis Despite 4 cycles of GEMOX (stopped due to a hypertransaminasemia believed to be due to gemcitabine) and 4 cycles of FOLFIRINOX, the tumor remained stable and continued to be considered unresectable. Radioembolization (resin microspheres, SIRS-spheres ® ) targeting the left liver (474 MBq) and segment IV (440 MBq) was performed. Laboratory diagnosis On computed tomography scan, a partial response was observed; the tumor was far less hypervascularized, and a margin was observed between the tumor and the right hepatic vein. A left hepatectomy enlarged to segment VIII was performed. Treatment Radioembolization can be useful in the treatment of cholangiocarcinoma, allowing in some cases a secondary resection. Peer review Interesting case report dealing with the value of radioembolization in the treatment of initially unresectable CCC. Useful aspect of a multimodal pathway. |
Former Arizona Sheriff Joe Arpaio has said he is 'seriously, seriously, seriously considering running for the US Senate.'
The 85-year-old said he was not interested in running for Trent Franks' congressional seat when the republican resigns in January for asking staffers to be surrogate mothers for his child.
But he admitted retiring Senator Jeff Flake's seat has caught his eye.
Former Arizona Sheriff Joe Arpaio (pictured this summer) has said he is 'seriously, seriously, seriously considering running for the US Senate.'
While buying meat at a Deli counter on Thursday, he told a Daily Beast reporter : 'No, I would not consider Franks' seat, but I am considering running for the Senate, Flake's seat. I feel like I just gave you a little scoop there.'
He faces stiff competition for the seat in the form of Kelli Ward, a former state senator who already has significant financial backing for a re-run.
Arpaio was dubbed 'America's toughest sheriff' for his crackdown on illegal immigrants during his 24 years in charge in Maricopa County, Arizona.
In August President Donald Trump pardoned the controversial sheriff less than a month after he was convicted of criminal contempt in a case involving racial profiling.
Trump signed the pardon for the 85-year-old Arpaio citing his long history of public service.
Arpaio, who campaigned for Trump in 2016, was convicted by a judge who ruled he had willfully violated a 2011 injunction barring his officers from stopping and detaining Latino motorists solely on suspicion that they were in the country illegally.
The former sheriff was facing up to six months in prison after he admitted to inadvertently disobeying the court order.
But he said the prosecution was a politically motivated attempt by the Obama administration to undermine his re-election bid.
President Donald Trump has pardoned controversial Arizona Sheriff Joe Arpaio (above) who was convicted of federal contempt earlier this year
Trump said in the statement pardoning Arpaio: 'Arpaio’s life and career, which began at the age of 18 when he enlisted in the military after the outbreak of the Korean War, exemplify selfless public service
'Throughout his time as Sheriff, Arpaio continued his life’s work of protecting the public from the scourges of crime and illegal immigration.'
Sheriff Joe Arpaio is now eighty-five years old, and after more than fifty years of admirable service to our Nation, he is worthy candidate for a Presidential pardon.'
Arpaio, who lost a bid for re-election in Arizona’s Maricopa County in November after 24 years in office, was known for his crackdown on undocumented immigrants and investigating unfounded Trump-supported claims questioning former President Barack Obama’s citizenship.
Before Trump granted the pardon, the American Civil Liberties Union, which sought the court injunction against Arpaio, said it would be 'a presidential endorsement of racism.' |
// TestSiegeCommonResults - text common results interface
func TestSiegeCommonResults(t *testing.T) {
var tool SiegeTool
cfg.Cfg.Verbose = true
config := &cfg.Config{}
config.Siege.Concurrent = 1
config.Siege.Time = 1
tool = SiegeTool{&config.Siege}
result, err := tool.BenchCommand("test")
if err != nil {
t.Fatal(err)
}
_ = result.Command()
_ = result.Params()
data := []byte("")
result.Parse(data)
data = []byte(SIEGE_RESULT)
result.Parse(data)
} |
/*
* Copyright (C) 2013-2013 <NAME>
* Copyright (C) 2013-2013 Parrot S.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
*
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.parrot.docdown.generator.html;
import com.parrot.docdown.data.page.IDocPage;
import com.parrot.docdown.generator.AssetsDir;
import com.parrot.docdown.generator.DefaultGenerator;
import com.parrot.docdown.generator.PageRenderer;
import org.rendersnake.DocType;
import org.rendersnake.HtmlCanvas;
import org.rendersnake.Renderable;
import java.io.IOException;
import static org.rendersnake.HtmlAttributesFactory.charset;
import static org.rendersnake.HtmlAttributesFactory.id;
public class PageRenderable implements Renderable {
private final AssetsDir assetDir;
private final Renderable navHeadRenderable;
private final Renderable mainHeadRenderable;
private final Renderable navContentRenderable;
private final Renderable mainContentRenderable;
public PageRenderable(DefaultGenerator generator, Renderable navHeadRenderable, Renderable mainHeadRenderable,
Renderable navContentRenderable, Renderable mainContentRenderable) {
this.assetDir = generator.getAssertsDir();
this.navHeadRenderable = navHeadRenderable;
this.mainHeadRenderable = mainHeadRenderable;
this.navContentRenderable = navContentRenderable;
this.mainContentRenderable = mainContentRenderable;
}
@Override
public void renderOn(HtmlCanvas html) throws IOException {
IDocPage page = PageRenderer.getPage(html);
String assertRelDir = page.getReferenceTo(assetDir.getPath()) + "/";
// write the doc type
html.render(DocType.HTML5).html();
// write header
html.head();
html.title().content(page.getTitle());
html.meta(charset("utf-8"));
html.macros().javascript(assertRelDir + "jquery-1.10.2.js");
html.macros().javascript(assertRelDir + "jquery-ui-1.10.4.js");
html.macros().javascript(assertRelDir + "highlight.js");
html.macros().stylesheet(assertRelDir + "highlight-default.css");
html.macros().javascript(assertRelDir + "quickdoc.js");
html.macros().stylesheet(assertRelDir + "quickdoc.css");
html.script().content("hljs.initHighlightingOnLoad();");
html._head();
// open body layout
html.body();
// write side box
html.div(id("side")).div(id("header")).render(navHeadRenderable)._div().div(id("content")).render
(navContentRenderable)._div().
_div();
// write main box
html.div(id("main")).div(id("header")).render(mainHeadRenderable)._div().div(id("content")).render
(mainContentRenderable)._div().
_div();
// addPage call to init script
html.script().content("init();", false);
// close body and generator
html._body()._html();
// done, close writer
html.getOutputWriter().close();
}
}
|
I recently got the chance to attend JAOO in Aarhus, Denmark. Besides learning a great amount about various approaches to solving hard problems that we all face as programmers (regardless of the stack we spend most of our time developing on), I got to meet so many interesting people from all walks of programmer life.
Joe Armstrong is the principle inventor of the Erlang programming Language and coined the term "Concurrency Oriented Programming". He has worked for Ericsson where he developed Erlang and was chief architect of the Erlang/OTP system.
In 1998 he left Ericsson to form Bluetail, a company which developed all its products in Erlang. In 2003 he obtain his PhD from the Royal Institute of Technology, Stockholm. The title of his thesis was "Making reliable distributed systems in the presence of software errors." Today he works for Ericsson.
He is author of the book Software for a concurrent world: (Pragmatic Bookshelf - July 15, 2007). He is married with 2 children, 2 cats and 4 motorcycles and would very much like to sell his Royal Enfield Bullet and replace it with a Norton Commando.
Mads Torgersen is a Senior Program Manager in the C# group and has been working on the design of LINQ and other new C# language features. Before joining Microsoft, Mads worked as an associate professor in computer science at the university of Aarhus, where he was part of the group that developed wildcards for Java generics.
Here, in part one of a two part interview, Joe and Mads discuss the pros and cons of object oriented programming, the new spotlight on concurrency and the future of languages (it should come as no surprise, for those of you who understand Erlang, that Joe is not a big time proponent of OO...). Erik Meijer, who is listening into the conversation will appear at random intervals to add his usual brilliant perspective.
This is a fantastic discussion. Listen in and learn from some of programming's masters. This was a real treat for me and one of the highlights of my time at JAOO. |
/**
* Copyright (c) 2011-2012, IBSOFT.
* All rights reserved.
*/
package br.com.ibsoft.f1.rs.client.bean;
import java.util.ArrayList;
import java.util.List;
import org.f1.entity.Equipe;
import org.jboss.resteasy.client.ClientRequest;
import org.jboss.resteasy.client.ClientResponse;
import org.jboss.resteasy.util.GenericType;
import br.com.ibsoft.f1.rs.client.EquipeResourceClient;
import br.com.ibsoft.f1.rs.client.F1HttpResourceClient;
/**
*
* @author lourenco
*
* @since v1.0.0
*/
public class EquipeResourceClientBean extends F1HttpResourceClient implements EquipeResourceClient {
private static final long serialVersionUID = -4076277045120633831L;
public EquipeResourceClientBean(String baseUrl) {
super(baseUrl);
}
/*
* (non-Javadoc)
*
* @see
* br.com.ibsoft.f1.rs.client.EquipeResourceClient#listarEquipes(java.lang
* .Integer , java.lang.Integer)
*/
@Override
public List<Equipe> listar(Integer first, Integer max) throws Exception {
ClientRequest request = createRequest("/anuncios");
ClientResponse<List<Equipe>> response = request.get(new GenericType<List<Equipe>>() {
});
List<Equipe> usuarios = new ArrayList<Equipe>(0);
System.out.println("RESPONSE " + response.getStatus());
if (response.getStatus() == 200) {
usuarios = response.getEntity();
}
return usuarios;
}
/*
* (non-Javadoc)
*
* @see
* br.com.ibsoft.f1.rs.client.EquipeResourceClient#buscarEquipePorId(java
* .lang .Long)
*/
@Override
public Equipe buscarEquipePor(Long id) {
// TODO Auto-generated method stub
return null;
}
/*
* (non-Javadoc)
*
* @see
* br.com.ibsoft.f1.rs.client.EquipeResourceClient#salvarEquipe(br.com.ibsoft
* . f1.entity.Equipe)
*/
@Override
public Equipe salvar(Equipe equipe) {
// TODO Auto-generated method stub
return null;
}
/*
* (non-Javadoc)
*
* @see
* br.com.ibsoft.f1.rs.client.EquipeResourceClient#atualizarEquipe(br.com
* .ibsoft .f1.entity.Equipe)
*/
@Override
public Equipe atualizar(Equipe equipe) {
// TODO Auto-generated method stub
return null;
}
/*
* (non-Javadoc)
*
* @see
* br.com.ibsoft.f1.rs.client.EquipeResourceClient#deletarEquipe(java.lang
* .Long)
*/
@Override
public void deletar(Long id) {
// TODO Auto-generated method stub
}
}
|
Case for Diagnosis. eruption and the long interval between its appearance and that of the primary sore, with the total absence of any history of reinfection, as well as the improbability of this event, made it almost out of the question to accept syphilis as a possible explanation of the present eruption. It was, unfortunately, impracticable to obtain a biopsy of this case, as the patient was leaving for America in a few days and refused permission. |
//
// rs.c
// Radar Simulation Framework
//
// Created by <NAME>.
// Copyright (c) 2015-2016 <NAME>. All rights reserved.
//
#include "rs.h"
#include "rs_priv.h"
// The block declaration is automatically generated by XCode
#if defined (_USE_GCL_)
#include "rs.cl.h"
#endif
#define RS_INDENT " "
#define RS_FMT "%-35s"
#define RS_FMT2 "%-14s"
#define RS_SHOW_DIV 9
#define CHECK_CL_CREATE_KERNEL \
if (ret != CL_SUCCESS) { \
rsprint("ERROR: Could not create OpenCL kernel. ret = %d\n", ret); \
clReleaseProgram(C->prog); \
clReleaseContext(C->context); \
return; \
}
#define CHECK_CL_CREATE_BUFFER \
if (ret != CL_SUCCESS) { \
rsprint("ERROR: clCreateBuffer() failed. ret = %d\n", ret); \
return; \
}
#pragma mark -
// These implementations are very inefficient on CPU; They are coded this way so comparison with the GPU kernel codes can be made easily.
cl_float4 complex_multiply(const cl_float4 a, const cl_float4 b) {
return (cl_float4){{
a.s0 * b.s0 - a.s1 * b.s1,
a.s1 * b.s0 + a.s0 * b.s1,
a.s2 * b.s2 - a.s3 * b.s3,
a.s3 * b.s2 + a.s2 * b.s3
}};
}
cl_float4 complex_divide(const cl_float4 a, const cl_float4 b) {
float bm01 = b.s0 * b.s0 + b.s1 * b.s1;
float bm23 = b.s2 * b.s2 + b.s3 * b.s3;
return (cl_float4){{
(a.s0 * b.s0 + a.s1 * b.s1) / bm01,
(a.s1 * b.s0 - a.s0 * b.s1) / bm01,
(a.s2 * b.s2 + a.s3 * b.s3) / bm23,
(a.s3 * b.s2 - a.s2 * b.s3) / bm23
}};
}
cl_double4 double_complex_multiply(const cl_double4 a, const cl_double4 b) {
return (cl_double4){{
a.s0 * b.s0 - a.s1 * b.s1,
a.s1 * b.s0 + a.s0 * b.s1,
a.s2 * b.s2 - a.s3 * b.s3,
a.s3 * b.s2 + a.s2 * b.s3
}};
}
cl_double4 double_complex_divide(const cl_double4 a, const cl_double4 b) {
double bm01 = b.s0 * b.s0 + b.s1 * b.s1;
double bm23 = b.s2 * b.s2 + b.s3 * b.s3;
return (cl_double4){{
(a.s0 * b.s0 + a.s1 * b.s1) / bm01,
(a.s1 * b.s0 - a.s0 * b.s1) / bm01,
(a.s2 * b.s2 + a.s3 * b.s3) / bm23,
(a.s3 * b.s2 - a.s2 * b.s3) / bm23
}};
}
void pfn_prog_notify(cl_program program, void *user_data) {
if (user_data != NULL) {
rsprint("Program %p returned %p (via pfn_prog_notify)\n", program, user_data);
}
}
void pfn_notify(const char *errinfo, const void *private_info, size_t cb, void *user_data) {
fprintf(stderr, "%s : RS : %s (via pfn_notify)\n", now(), errinfo);
}
// CL_DEVICE_TYPE_GPU
void get_device_info(cl_device_type device_type, cl_uint *num_devices, cl_device_id *devices, cl_uint *num_cus, cl_uint *vendors, cl_int detail_level) {
int i = 0, j = 0;
cl_uint num_platforms = 0;
cl_uint platform_num_devices = 0;
*num_devices = 0;
cl_platform_id platforms[RS_MAX_GPU_PLATFORM];
char buf_char[RS_MAX_STR];
cl_uint buf_uint;
cl_ulong buf_ulong;
int s = 0;
char str[RS_MAX_STR];
CL_CHECK(clGetPlatformIDs(RS_MAX_GPU_PLATFORM, platforms, &num_platforms));
if (detail_level)
s += snprintf(str + s, RS_MAX_STR, "* Number of OpenCL platforms: %d\n", num_platforms);
for (; i < num_platforms; i++) {
CL_CHECK(clGetDeviceIDs(platforms[i], device_type, RS_MAX_GPU_DEVICE - *num_devices, &devices[*num_devices], &platform_num_devices));
*num_devices += platform_num_devices;
if (*num_devices >= RS_MAX_GPU_DEVICE) {
fprintf(stderr, "%s : RS : Sweet. A lot of devices found. Upgrade! Upgrade! Upgrade! \n", now());
*num_devices = RS_MAX_GPU_DEVICE;
return;
}
if (detail_level) {
s += snprintf(str + s, RS_MAX_STR, " > PLATFORM %d:\n", i);
CL_CHECK(clGetPlatformInfo(platforms[i], CL_PLATFORM_NAME, RS_MAX_STR, buf_char, NULL));
s += snprintf(str + s, RS_MAX_STR, " * NAME = %s\n", buf_char);
if (detail_level > 1) {
CL_CHECK(clGetPlatformInfo(platforms[i], CL_PLATFORM_VENDOR, RS_MAX_STR, buf_char, NULL));
s += snprintf(str + s, RS_MAX_STR, " * VENDOR = %s\n", buf_char);
}
CL_CHECK(clGetPlatformInfo(platforms[i], CL_PLATFORM_PROFILE, RS_MAX_STR, buf_char, NULL));
s += snprintf(str + s, RS_MAX_STR, " * PROFILE = %s\n", buf_char);
CL_CHECK(clGetPlatformInfo(platforms[i], CL_PLATFORM_VERSION, RS_MAX_STR, buf_char, NULL));
s += snprintf(str + s, RS_MAX_STR, " * VERSION = %s\n", buf_char);
if (detail_level > 2) {
CL_CHECK(clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, RS_MAX_STR, buf_char, NULL));
if (strlen(buf_char)) {
char *b = buf_char;
while (1) {
char *e = strchr(b, ' ');
if (e) {
*e = '\0';
}
if (b == buf_char) {
s += snprintf(str + s, RS_MAX_STR, " * EXTENSIONS = %s\n", b);
} else {
s += snprintf(str + s, RS_MAX_STR, " %s\n", b);
}
if (e) {
b = e + 1;
} else {
break;
}
}
}
}
s += snprintf(str + s, RS_MAX_STR, " * Number of OpenCL devices = %d\n", *num_devices);
for (j = 0; j < platform_num_devices; j++) {
s += snprintf(str + s, RS_MAX_STR, " > DEVICE %d:\n", j);
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_NAME, RS_MAX_STR, buf_char, NULL));
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s\n", "CL_DEVICE_NAME", buf_char);
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_VENDOR, RS_MAX_STR, buf_char, NULL));
if (strcasestr(buf_char, "intel")) {
vendors[j] = RS_GPU_VENDOR_INTEL;
} else if (strcasestr(buf_char, "nvidia")) {
vendors[j] = RS_GPU_VENDOR_NVIDIA;
} else if (strcasestr(buf_char, "amd")) {
vendors[j] = RS_GPU_VENDOR_AMD;
} else {
vendors[j] = RS_GPU_VENDOR_UNKNOWN;
}
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s (%d)\n", "CL_DEVICE_VENDOR", buf_char, vendors[j]);
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_GLOBAL_MEM_SIZE, sizeof(buf_ulong), &buf_ulong, NULL));
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s B\n", "CL_DEVICE_GLOBAL_MEM_SIZE", commaint(buf_ulong));
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(buf_ulong), &buf_ulong, NULL));
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s B\n", "CL_DEVICE_MAX_MEM_ALLOC_SIZE", commaint(buf_ulong));
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(buf_uint), &num_cus[j], NULL));
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %u\n", "CL_DEVICE_MAX_COMPUTE_UNITS", (unsigned int)num_cus[j]);
if (detail_level > 1) {
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_VERSION, RS_MAX_STR, buf_char, NULL));
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s\n", "CL_DEVICE_VERSION", buf_char);
CL_CHECK(clGetDeviceInfo(devices[j], CL_DRIVER_VERSION, RS_MAX_STR, buf_char, NULL));
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s\n", "CL_DRIVER_VERSION", buf_char);
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_MAX_CLOCK_FREQUENCY, sizeof(buf_uint), &buf_uint, NULL));
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s MHz\n", "CL_DEVICE_MAX_CLOCK_FREQUENCY", commaint(buf_uint));
if (detail_level > 2) {
size_t work_sizes[3];
clGetDeviceInfo(devices[j], CL_DEVICE_MAX_WORK_ITEM_SIZES, sizeof(work_sizes), &work_sizes, NULL);
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %zu / %zu / %zu\n", "CL_DEVICE_MAX_WORK_ITEM_SIZES", work_sizes[0], work_sizes[1], work_sizes[2]);
clGetDeviceInfo(devices[j], CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), work_sizes, NULL);
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %zu\n", "CL_DEVICE_MAX_WORK_GROUP_SIZE", work_sizes[0]);
clGetDeviceInfo(devices[j], CL_DEVICE_LOCAL_MEM_SIZE, sizeof(cl_ulong), &buf_ulong, NULL);
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s B\n", "CL_DEVICE_LOCAL_MEM_SIZE", commaint(buf_ulong));
clGetDeviceInfo(devices[j], CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, sizeof(cl_ulong), &buf_ulong, NULL);
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " = %s B\n\n", "CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE", commaint(buf_ulong));
clGetDeviceInfo(devices[j], CL_DEVICE_IMAGE2D_MAX_WIDTH, sizeof(size_t), work_sizes, NULL);
s += snprintf(str + s, RS_MAX_STR, " - " RS_FMT " " RS_FMT2 " %7s\n", "CL_DEVICE_IMAGE <dim>", "2D_MAX_WIDTH", commaint(work_sizes[0]));
clGetDeviceInfo(devices[j], CL_DEVICE_IMAGE2D_MAX_HEIGHT, sizeof(size_t), work_sizes, NULL);
s += snprintf(str + s, RS_MAX_STR, " " RS_FMT " " RS_FMT2 " %7s\n", "", "2D_MAX_HEIGHT", commaint(work_sizes[0]));
clGetDeviceInfo(devices[j], CL_DEVICE_IMAGE3D_MAX_WIDTH, sizeof(size_t), work_sizes, NULL);
s += snprintf(str + s, RS_MAX_STR, " " RS_FMT " " RS_FMT2 " %7s\n", "", "3D_MAX_WIDTH", commaint(work_sizes[0]));
clGetDeviceInfo(devices[j], CL_DEVICE_IMAGE3D_MAX_HEIGHT, sizeof(size_t), work_sizes, NULL);
s += snprintf(str + s, RS_MAX_STR, " " RS_FMT " " RS_FMT2 " %7s\n", "", "3D_MAX_HEIGHT", commaint(work_sizes[0]));
clGetDeviceInfo(devices[j], CL_DEVICE_IMAGE3D_MAX_DEPTH, sizeof(size_t), work_sizes, NULL);
s += snprintf(str + s, RS_MAX_STR, " " RS_FMT " " RS_FMT2 " %7s\n\n", "", "3D_MAX_DEPTH", commaint(work_sizes[0]));
}
}
} // for (; j < platform_num_devices; j++)
} else {
for (; j < platform_num_devices; j++)
CL_CHECK(clGetDeviceInfo(devices[j], CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(buf_uint), &num_cus[j], NULL));
}
} // for (; i < num_platforms; i++)
printf("%s", str);
}
cl_uint read_kernel_source_from_files(char *src_ptr[], ...) {
static char char_buf[RS_MAX_KERNEL_SRC] = "";
cl_uint count = 0, len = 0;
va_list files;
va_start(files, src_ptr);
char *filename = va_arg(files, char *);
while (filename != NULL && strlen(filename) > 0) {
#ifdef DEBUG_KERNEL_READ
rsprint("src '%s' (%d)\n", filename, (int)strlen(filename));
#endif
// Read in the kernel source
FILE *fid = fopen(filename, "r");
if (fid == NULL) {
rsprint("ERROR: Unable to open kernel source %s.\n", filename);
break;
}
while (!feof(fid) && strlen(char_buf) < RS_MAX_KERNEL_SRC && count < RS_MAX_KERNEL_LINES) {
src_ptr[count] = fgets(char_buf + len, RS_MAX_KERNEL_SRC - len, fid);
if (src_ptr[count] != NULL) {
len += strlen(src_ptr[count]) + 1;
count++;
}
}
fclose(fid);
filename = va_arg(files, char *);
}
va_end(files);
if (len >= RS_MAX_KERNEL_SRC * 8 / 10) {
rsprint("\e[31mWARNING. Kernel source size = %s / %s (%.2f > 80%%)\e[0m\n",
commaint(len), commaint(RS_MAX_KERNEL_SRC), (float)len / RS_MAX_KERNEL_SRC * 100.0f);
}
if (len >= RS_MAX_KERNEL_SRC || count >= RS_MAX_KERNEL_LINES) {
rsprint("ERROR: Kernel source exceeds buffer size constraints. (len = %s / %s, count = %s / %s)\n",
commaint(len), commaint(RS_MAX_KERNEL_SRC), commaint(count), commaint(RS_MAX_KERNEL_LINES));
return 0;
}
#ifdef DEBUG_KERNEL_READ
printf("%d lines\n", count);
for (int i = 0; i < count; i++) {
printf("%d:%s", i, src_ptr[i]);
}
#endif
return count;
}
ReductionParams *make_reduction_params(cl_uint count, cl_uint user_max_groups, cl_uint user_max_work_items) {
ReductionParams *params = (ReductionParams *)malloc(sizeof(ReductionParams));
if (params == NULL) {
rsprint("ERROR: Unable to allocate memory for ReductionParams.");
return NULL;
}
// Copy these for housekeeping
params->count = count;
params->user_max_groups = user_max_groups;
params->user_max_work_items = user_max_work_items;
// Work items is only count / 2 for small counts
int work_items = count > 2 * user_max_work_items ? user_max_work_items : count / 2;
// Number of group of item-pairs
int groups = count / (work_items * 2);
if (groups > user_max_groups) {
groups = user_max_groups;
}
cl_uint levels = 1;
cl_uint numels = groups;
// First pass to figure out how many levels
while (numels > 1) {
int work_items = (numels > user_max_work_items * 2) ? user_max_work_items : numels / 2;
numels = numels / (work_items * 2);
levels++;
}
params->pass_counts = levels;
params->entry_counts = (cl_uint *)malloc(levels * sizeof(cl_uint));
params->group_counts = (cl_uint *)malloc(levels * sizeof(cl_uint));
params->work_item_counts = (cl_uint *)malloc(levels * sizeof(cl_uint));
params->entry_counts[0] = count;
params->group_counts[0] = groups;
params->work_item_counts[0] = work_items;
int level = 1;
numels = groups;
while (numels > 1) {
int work_items = (numels > user_max_work_items * 2) ? user_max_work_items : numels / 2;
int groups = numels / (work_items * 2);
if (groups > user_max_groups) {
groups = user_max_groups;
}
params->entry_counts[level] = numels;
params->group_counts[level] = groups;
params->work_item_counts[level] = work_items;
numels = numels / (work_items * 2);
level++;
}
return params;
}
void free_reduction_params(ReductionParams *params) {
free(params->entry_counts);
free(params->group_counts);
free(params->work_item_counts);
free(params);
}
float read_table(const float *table, const float index_last, const float index) {
float floor_index = floorf(index);
float alpha = index - floor_index;
if (index <= 0.0f) {
// printf("%.2f / %.2f --> i = %u X0\n", index, index_last, 0);
return table[0];
} else if (floor_index >= index_last) {
// printf("%.2f / %.2f --> i = %u XM\n", index, index_last, (unsigned int)index_last);
return table[(unsigned int)index_last];
}
unsigned int i = (unsigned int)floor_index;
// printf("%.2f / %.2f --> i = %d, %d / %.2f, %.2f alpha = %.2f v = %.3f\n", index, index_last, i, i+1,
// table[i], table[i + 1],
// alpha,
// table[i] + alpha * (table[i + 1] - table[i]);
return table[i] + alpha * (table[i + 1] - table[i]);
}
float zdr(cl_float4 x) {
return 10.0f * log10f((x.s0 * x.s0 + x.s1 * x.s1) / (x.s2 * x.s2 + x.s3 * x.s3 + 1.0e-20));
}
#pragma mark -
#pragma mark Private Functions
void RS_worker_init(RSWorker *C, cl_device_id dev, cl_uint src_size, const char **src_ptr, cl_context_properties sharegroup, const char verb) {
C->dev = dev;
C->verb = verb;
C->mem_usage = 0;
clGetDeviceInfo(C->dev, CL_DEVICE_MAX_COMPUTE_UNITS, sizeof(cl_uint), &C->num_cus, NULL);
CL_CHECK(clGetDeviceInfo(C->dev, CL_DEVICE_GLOBAL_MEM_SIZE, sizeof(C->mem_size), &C->mem_size, NULL));
#if defined (_USE_GCL_)
// A queue & semaphore for the CL work
rsprint("== GCL ===");
rsprint("RS_worker_init - gcl_create_dispatch_queue() ...");
C->que = gcl_create_dispatch_queue(CL_DEVICE_TYPE_USE_ID, C->dev);
// C->que = gcl_create_dispatch_queue(CL_DEVICE_TYPE_GPU, NULL);
// cl_device_id gpu_device = gcl_get_device_id_with_dispatch_queue(C->que);
// rsprint("Asking for CL_DEVICE_TYPE_GPU gives us: %p vs %p", gpu_device, dev);
C->sem = dispatch_semaphore_create(0);
C->sem_upload = dispatch_semaphore_create(0);
if (C->sem == NULL || C->sem_upload == NULL) {
rsprint("Error. Unable to create semaphore for CL workers.\n");
return;
}
// Set all the surface to null
//C->surf_range_weight = NULL;
//C->surf_angular_weight = NULL;
//C->surf_angular_weight_2d = NULL;
C->surf_rcs_ellipsoids = NULL;
int i;
for (i = 0; i < RS_MAX_ADM_TABLES; i++) {
C->surf_adm_cd[i] = NULL;
C->surf_adm_cm[i] = NULL;
}
for (i = 0; i < RS_MAX_RCS_TABLES; i++) {
C->surf_rcs_real[i] = NULL;
C->surf_rcs_imag[i] = NULL;
}
C->surf_uvwt[0] = NULL;
C->surf_uvwt[1] = NULL;
C->surf_cpxx[0] = NULL;
C->surf_cpxx[1] = NULL;
#else
cl_int ret;
if (sharegroup) {
#if defined (__APPLE__)
cl_context_properties prop[] = {
CL_CONTEXT_PROPERTY_USE_CGL_SHAREGROUP_APPLE, (cl_context_properties)sharegroup,
0
};
#else
cl_context_properties prop[] = {
0
};
rsprint("ERROR: I do not know how to share GL & CL on this platform.");
#endif
// Create a context from a CGL share group
C->context = clCreateContext(prop, 1, &C->dev, &pfn_notify, NULL, &ret);
C->sharegroup = sharegroup;
} else {
// Create an independent OpenCL context
C->context = clCreateContext(NULL, 1, &C->dev, &pfn_notify, NULL, &ret);
}
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error creating OpenCL context. ret = %d\n", now(), ret);
exit(EXIT_FAILURE);
} else if (verb > 1) {
rsprint("OpenCL context[%d] created (context @ %p, device_id @ %p).\n", (int)C->name, C->context, dev);
}
// Program
C->prog = clCreateProgramWithSource(C->context, src_size, (const char **)src_ptr, NULL, &ret);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : ERROR: Unable to create OpenCL program. ret = %d\n", now(), ret);
clReleaseContext(C->context);
exit(EXIT_FAILURE);
}
if (verb) {
rsprint("clBuildProgram() ... worker[%d]", (int)C->name);
ret = clBuildProgram(C->prog, 1, &C->dev, "", &pfn_prog_notify, NULL);
} else {
ret = clBuildProgram(C->prog, 1, &C->dev, "", NULL, NULL);
}
if (ret != CL_SUCCESS) {
char char_buf[RS_MAX_STR] = "";
clGetProgramBuildInfo(C->prog, C->dev, CL_PROGRAM_BUILD_LOG, RS_MAX_STR, char_buf, NULL);
fprintf(stderr, "%s : RS : ERROR: CL Compilation failed:\n%s", now(), char_buf);
clReleaseProgram(C->prog);
clReleaseContext(C->context);
exit(EXIT_FAILURE);
} else if (verb > 1) {
rsprint("OpenCL program[%d] created (program @ %p).\n", (int)C->name, C->prog);
}
// Tie all kernels to the program
C->kern_io = clCreateKernel(C->prog, "io", &ret); CHECK_CL_CREATE_KERNEL
C->kern_dummy = clCreateKernel(C->prog, "dummy", &ret); CHECK_CL_CREATE_KERNEL
C->kern_db_rcs = clCreateKernel(C->prog, "db_rcs", &ret); CHECK_CL_CREATE_KERNEL
C->kern_bg_atts = clCreateKernel(C->prog, "bg_atts", &ret); CHECK_CL_CREATE_KERNEL
C->kern_fp_atts = clCreateKernel(C->prog, "fp_atts", &ret); CHECK_CL_CREATE_KERNEL
C->kern_el_atts = clCreateKernel(C->prog, "el_atts", &ret); CHECK_CL_CREATE_KERNEL
C->kern_db_atts = clCreateKernel(C->prog, "db_atts", &ret); CHECK_CL_CREATE_KERNEL
C->kern_scat_clr = clCreateKernel(C->prog, "scat_clr", &ret); CHECK_CL_CREATE_KERNEL
C->kern_scat_sig_aux = clCreateKernel(C->prog, "scat_sig_aux", &ret); CHECK_CL_CREATE_KERNEL
C->kern_make_pulse_pass_1 = clCreateKernel(C->prog, "make_pulse_pass_1", &ret); CHECK_CL_CREATE_KERNEL
C->kern_make_pulse_pass_2_group = clCreateKernel(C->prog, "make_pulse_pass_2_group", &ret); CHECK_CL_CREATE_KERNEL
C->kern_make_pulse_pass_2_local = clCreateKernel(C->prog, "make_pulse_pass_2_range", &ret); CHECK_CL_CREATE_KERNEL
C->kern_make_pulse_pass_2_range = clCreateKernel(C->prog, "make_pulse_pass_2_local", &ret); CHECK_CL_CREATE_KERNEL
C->kern_make_pulse_pass_2 = C->kern_make_pulse_pass_2_group;
if (verb > 1) {
rsprint("Kernels for program[%d] created.\n", (int)C->name);
if (verb > 2) {
size_t pref_size;
CL_CHECK(clGetKernelWorkGroupInfo(C->kern_db_atts, C->dev, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(pref_size), &pref_size, NULL));
rsprint("KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE = %ld db_atts()\n", pref_size);
CL_CHECK(clGetKernelWorkGroupInfo(C->kern_el_atts, C->dev, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(pref_size), &pref_size, NULL));
rsprint("KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE = %ld el_atts()\n", pref_size);
CL_CHECK(clGetKernelWorkGroupInfo(C->kern_make_pulse_pass_1, C->dev, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(pref_size), &pref_size, NULL));
rsprint("KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE = %ld make_pulse_pass_1()\n", pref_size);
CL_CHECK(clGetKernelWorkGroupInfo(C->kern_make_pulse_pass_2_group, C->dev, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(pref_size), &pref_size, NULL));
rsprint("KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE = %ld make_pulse_pass_2_group()\n", pref_size);
CL_CHECK(clGetKernelWorkGroupInfo(C->kern_make_pulse_pass_2_local, C->dev, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(pref_size), &pref_size, NULL));
rsprint("KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE = %ld make_pulse_pass_2_local()\n", pref_size);
CL_CHECK(clGetKernelWorkGroupInfo(C->kern_make_pulse_pass_2_range, C->dev, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(pref_size), &pref_size, NULL));
rsprint("KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE = %ld make_pulse_pass_2_range()\n", pref_size);
}
}
// A queue for the CL work of each device
C->que = clCreateCommandQueue(C->context, C->dev, 0, &ret);
if (ret != CL_SUCCESS) {
rsprint("Creating command queue[%d] failed (ret = %d).\n", (int)C->name, ret);
} else if (verb > 1) {
rsprint("Command queue for context[%d] created.\n", (int)C->name);
}
#endif
}
void RS_worker_free(RSWorker *C) {
#if defined (_USE_GCL_)
dispatch_release(C->sem);
dispatch_release(C->que);
gcl_free(C->range_weight);
gcl_free(C->angular_weight);
gcl_free(C->angular_weight_2d);
gcl_release_image(C->rcs_ellipsoid);
for (int a = 0; a < C->adm_count; a++) {
gcl_release_image(C->adm_cd[a]);
gcl_release_image(C->adm_cm[a]);
}
for (int r = 0; r < C->rcs_count; r++) {
gcl_release_image(C->rcs_real[r]);
gcl_release_image(C->rcs_imag[r]);
}
gcl_release_image(C->les_uvwt[0]);
gcl_release_image(C->les_uvwt[1]);
gcl_release_image(C->dff_icdf[0]);
gcl_release_image(C->dff_icdf[1]);
#else
clReleaseCommandQueue(C->que);
clReleaseKernel(C->kern_io);
clReleaseKernel(C->kern_dummy);
clReleaseKernel(C->kern_db_rcs);
clReleaseKernel(C->kern_bg_atts);
clReleaseKernel(C->kern_fp_atts);
clReleaseKernel(C->kern_el_atts);
clReleaseKernel(C->kern_db_atts);
clReleaseKernel(C->kern_scat_clr);
clReleaseKernel(C->kern_scat_sig_aux);
clReleaseKernel(C->kern_make_pulse_pass_1);
clReleaseKernel(C->kern_make_pulse_pass_2_group);
clReleaseKernel(C->kern_make_pulse_pass_2_local);
clReleaseKernel(C->kern_make_pulse_pass_2_range);
clReleaseProgram(C->prog);
clReleaseContext(C->context);
clReleaseMemObject(C->range_weight);
clReleaseMemObject(C->angular_weight);
clReleaseMemObject(C->angular_weight_2d);
clReleaseMemObject(C->rcs_ellipsoid);
for (int a = 0; a < C->adm_count; a++) {
clReleaseMemObject(C->adm_cd[a]);
clReleaseMemObject(C->adm_cm[a]);
}
for (int r = 0; r < C->rcs_count; r++) {
clReleaseMemObject(C->rcs_real[r]);
clReleaseMemObject(C->rcs_imag[r]);
}
clReleaseMemObject(C->les_uvwt[0]);
clReleaseMemObject(C->les_uvwt[1]);
clReleaseMemObject(C->dff_icdf[0]);
clReleaseMemObject(C->dff_icdf[1]);
#endif
}
void RS_worker_malloc(RSHandle *H, const int worker_id) {
RSWorker *C = &H->workers[worker_id];
if (C == NULL) {
rsprint("Worker[%d] has not been initialized?\n", worker_id);
return;
}
if ((H->status & RSStatusPopulationDefined) == 0) {
rsprint("ERROR: Population has not been defined.\n");
return;
}
// Derive the necessary parameters from host to compute workers
if (C->num_scats != H->num_scats / MAX(1, H->num_workers)) {
rsprint("ERROR: Inconsistent number of scatterers.\n");
return;
}
size_t group_size_multiple = RS_CL_GROUP_ITEMS;
#if !defined (_USE_GCL_)
clGetKernelWorkGroupInfo(C->kern_dummy, C->dev, CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, sizeof(group_size_multiple), &group_size_multiple, NULL);
#endif
if (group_size_multiple > RS_CL_GROUP_ITEMS) {
rsprint("ERROR: Potential memory leak. work_items(%d) > RS_CL_GROUP_ITEMS(%d).\n", now(), (int)group_size_multiple, RS_CL_GROUP_ITEMS);
exit(EXIT_FAILURE);
}
size_t max_work_group_size;
clGetDeviceInfo(C->dev, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(max_work_group_size), &max_work_group_size, NULL);
cl_ulong local_mem_size;
clGetDeviceInfo(C->dev, CL_DEVICE_LOCAL_MEM_SIZE, sizeof(cl_ulong), &local_mem_size, NULL);
if (H->verb > 2) {
rsprint("CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE = %zu", group_size_multiple);
rsprint("CL_DEVICE_MAX_WORK_GROUP_SIZE = %zu", (int)max_work_group_size);
rsprint("CL_DEVICE_LOCAL_MEM_SIZE = %zu", (size_t)local_mem_size);
}
C->make_pulse_params = RS_make_pulse_params((cl_uint)C->num_scats,
(cl_uint)group_size_multiple,
(cl_uint)max_work_group_size,
(cl_uint)local_mem_size,
H->params.range_start,
H->params.range_delta,
H->params.range_count);
const unsigned long work_numel = C->make_pulse_params.global[0] * C->make_pulse_params.local[0] * H->params.range_count;
#if defined (_USE_GCL_)
// printf("Creating cl_mem from vbo ... %d %d %d \n", C->vbo_scat_pos, C->vbo_scat_clr, C->vbo_scat_ori);
C->scat_pos = gcl_gl_create_ptr_from_buffer(C->vbo_scat_pos);
if (C->scat_pos == NULL) {
rsprint("ERROR: gcl_gl_create_ptr_from_buffer() failed for scat_pos.\n");
C->scat_pos = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
}
C->scat_clr = gcl_gl_create_ptr_from_buffer(C->vbo_scat_clr);
if (C->scat_clr == NULL) {
rsprint("ERROR: gcl_gl_create_ptr_from_buffer() failed for scat_clr.\n");
C->scat_clr = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
}
C->scat_ori = gcl_gl_create_ptr_from_buffer(C->vbo_scat_ori);
if (C->scat_ori == NULL) {
rsprint("ERROR: gcl_gl_create_ptr_from_buffer() failed for scat_ori.\n");
C->scat_ori = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
}
C->scat_vel = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
C->scat_tum = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
C->scat_aux = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
C->scat_rcs = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
C->scat_sig = gcl_malloc(C->num_scats * sizeof(cl_float4), NULL, 0);
C->work = gcl_malloc(work_numel * sizeof(cl_float4), NULL, 0);
C->pulse = gcl_malloc(H->params.range_count * sizeof(cl_float4), NULL, 0);
C->scat_rnd = gcl_malloc(C->num_scats * sizeof(cl_int4), NULL, 0);
C->mem_size += (8 * C->num_scats + work_numel + H->params.range_count) * sizeof(cl_float4) + C->num_scats * sizeof(cl_uint4);
#else
cl_int ret;
//printf("shared_vbo: %d %d %d\n", C->vbo_scat_pos, C->vbo_scat_clr, C->vbo_scat_ori);
size_t numel = ((C->num_scats + group_size_multiple - 1) / group_size_multiple) * group_size_multiple;
//printf("numel = %zu num_scats = %zu\n", numel, C->num_scats);
if (H->has_vbo_from_gl) {
C->scat_pos = clCreateFromGLBuffer(C->context, CL_MEM_READ_WRITE, C->vbo_scat_pos, &ret);
C->scat_clr = clCreateFromGLBuffer(C->context, CL_MEM_READ_WRITE, C->vbo_scat_clr, &ret);
C->scat_ori = clCreateFromGLBuffer(C->context, CL_MEM_READ_WRITE, C->vbo_scat_ori, &ret);
if (C->scat_pos == NULL || C->scat_clr == NULL || C->scat_ori == NULL || ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error in clCreateFromGLBuffer(). ret = %d\n", now(), ret);
exit(EXIT_FAILURE);
}
} else {
C->scat_pos = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->scat_clr = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->scat_ori = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
}
C->scat_vel = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->scat_tum = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->scat_aux = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->scat_rcs = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->scat_sig = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->scat_rnd = clCreateBuffer(C->context, CL_MEM_READ_WRITE, numel * sizeof(cl_int4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->work = clCreateBuffer(C->context, CL_MEM_READ_WRITE, work_numel * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
C->pulse = clCreateBuffer(C->context, CL_MEM_READ_WRITE, H->params.range_count * sizeof(cl_float4), NULL, &ret); CHECK_CL_CREATE_BUFFER
// Set some components to zero
cl_float4 *zeros = (cl_float4 *)malloc(numel * sizeof(cl_float4));
memset(zeros, 0, numel * sizeof(cl_float4));
clEnqueueWriteBuffer(C->que, C->scat_aux, CL_TRUE, 0, numel * sizeof(cl_float4), zeros, 0, NULL, NULL);
clEnqueueWriteBuffer(C->que, C->scat_rcs, CL_TRUE, 0, numel * sizeof(cl_float4), zeros, 0, NULL, NULL);
clEnqueueWriteBuffer(C->que, C->scat_sig, CL_TRUE, 0, numel * sizeof(cl_float4), zeros, 0, NULL, NULL);
free(zeros);
C->mem_usage += (8 * numel + work_numel + H->params.range_count) * sizeof(cl_float4) + numel * sizeof(cl_uint4);
//
// Set up kernel's input / output arguments
//
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_io, 0, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_io, 1, sizeof(cl_mem), &C->scat_aux);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel io().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_dummy, 0, sizeof(cl_mem), &C->scat_pos);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel dummy().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentPosition, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentOrientation, sizeof(cl_mem), &C->scat_ori);
ret |= clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSection, sizeof(cl_mem), &C->scat_rcs);
ret |= clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionReal, sizeof(cl_mem), &C->rcs_real[0]);
ret |= clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionImag, sizeof(cl_mem), &C->rcs_imag[0]);
ret |= clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionDescription, sizeof(cl_float16), &C->rcs_desc[0]);
ret |= clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel kern_db_rcs().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentPosition, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentVelocity, sizeof(cl_mem), &C->scat_vel);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentRadarCrossSection, sizeof(cl_mem), &C->scat_rcs);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentRandomSeed, sizeof(cl_mem), &C->scat_rnd);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[0]);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[0]);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentBackgroundDescription, sizeof(cl_float16), &C->les_desc);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentEllipsoidRCS, sizeof(cl_mem), &C->rcs_ellipsoid);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentEllipsoidRCSDescription, sizeof(cl_float4), &C->rcs_ellipsoid_desc);
ret |= clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel kern_bg_atts().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentPosition, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentVelocity, sizeof(cl_mem), &C->scat_vel);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentRadarCrossSection, sizeof(cl_mem), &C->scat_rcs);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentRandomSeed, sizeof(cl_mem), &C->scat_rnd);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[0]);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[0]);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentBackgroundDescription, sizeof(cl_float16), &C->les_desc);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentEllipsoidRCS, sizeof(cl_mem), &C->rcs_ellipsoid);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentEllipsoidRCSDescription, sizeof(cl_float4), &C->rcs_ellipsoid_desc);
ret |= clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel kern_fp_atts().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentPosition, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentVelocity, sizeof(cl_mem), &C->scat_vel);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentRadarCrossSection, sizeof(cl_mem), &C->scat_rcs);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentRandomSeed, sizeof(cl_mem), &C->scat_rnd);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[0]);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[0]);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentBackgroundDescription, sizeof(cl_float16), &C->les_desc);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentEllipsoidRCS, sizeof(cl_mem), &C->rcs_ellipsoid);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentEllipsoidRCSDescription, sizeof(cl_float4), &C->rcs_ellipsoid_desc);
ret |= clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel kern_el_atts().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentPosition, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentOrientation, sizeof(cl_mem), &C->scat_ori);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentVelocity, sizeof(cl_mem), &C->scat_vel);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentTumble, sizeof(cl_mem), &C->scat_tum);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRadarCrossSection, sizeof(cl_mem), &C->scat_rcs);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRandomSeed, sizeof(cl_mem), &C->scat_rnd);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentBackgroundVelocityDescription, sizeof(cl_float16), &C->les_desc);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentAirDragModelDrag, sizeof(cl_mem), &C->adm_cd[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentAirDragModelMomentum, sizeof(cl_mem), &C->adm_cm[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentAirDragModelDescription, sizeof(cl_float16), &C->adm_desc[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRadarCrossSectionReal, sizeof(cl_mem), &C->rcs_real[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRadarCrossSectionImag, sizeof(cl_mem), &C->rcs_imag[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRadarCrossSectionDescription, sizeof(cl_float16), &C->rcs_desc[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentDebrisFluxField, sizeof(cl_mem), &C->dff_icdf[0]);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentDebrisFluxFieldDescription, sizeof(cl_float16), &C->dff_desc);
ret |= clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel kern_db_atts().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_scat_clr, RSScattererColorKernelArgumentColor, sizeof(cl_mem), &C->scat_clr);
ret |= clSetKernelArg(C->kern_scat_clr, RSScattererColorKernelArgumentPosition, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_scat_clr, RSScattererColorKernelArgumentAuxiliary, sizeof(cl_mem), &C->scat_aux);
ret |= clSetKernelArg(C->kern_scat_clr, RSScattererColorKernelArgumentRadarCrossSection, sizeof(cl_mem), &C->scat_rcs);
ret |= clSetKernelArg(C->kern_scat_clr, RSScattererColorKernelArgumentDrawMode, sizeof(cl_uint4), &H->draw_mode);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel kern_scat_clr().\n", now());
exit(EXIT_FAILURE);
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentSignal, sizeof(cl_mem), &C->scat_sig);
ret |= clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentAuxiliary, sizeof(cl_mem), &C->scat_aux);
ret |= clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentPosition, sizeof(cl_mem), &C->scat_pos);
ret |= clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentRadarCrossSection, sizeof(cl_mem), &C->scat_rcs);
ret |= clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentWeightTable, sizeof(cl_mem), &C->angular_weight);
ret |= clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentWeightTableDescription, sizeof(cl_float4), &C->angular_weight_desc);
ret |= clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel kern_scat_sig_aux().\n", now());
exit(EXIT_FAILURE);
}
if (C->verb > 1) {
rsprint("Pass 1 global =%7s local = %3zu x %2d = %6s B groups = %4d N = %9s\n",
commaint(C->make_pulse_params.global[0]),
C->make_pulse_params.local[0],
C->make_pulse_params.range_count,
commaint(C->make_pulse_params.local_mem_size[0]),
C->make_pulse_params.group_counts[0],
commaint(C->make_pulse_params.entry_counts[0]));
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 0, sizeof(cl_mem), &C->work);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 1, sizeof(cl_mem), &C->scat_sig);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 2, sizeof(cl_mem), &C->scat_aux);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 3, C->make_pulse_params.local_mem_size[0], NULL);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 4, sizeof(cl_mem), &C->range_weight);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 5, sizeof(cl_float4), &C->range_weight_desc);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 6, sizeof(float), &C->make_pulse_params.range_start);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 7, sizeof(float), &C->make_pulse_params.range_delta);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 8, sizeof(unsigned int), &C->make_pulse_params.range_count);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 9, sizeof(unsigned int), &C->make_pulse_params.group_counts[0]);
ret |= clSetKernelArg(C->kern_make_pulse_pass_1, 10, sizeof(unsigned int), &C->make_pulse_params.entry_counts[0]);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel make_pulse_pass_1().\n", now());
exit(EXIT_FAILURE);
}
if (C->make_pulse_params.cl_pass_2_method == RS_CL_PASS_2_IN_LOCAL) {
C->kern_make_pulse_pass_2 = C->kern_make_pulse_pass_2_local;
} else if (C->make_pulse_params.cl_pass_2_method == RS_CL_PASS_2_IN_RANGE) {
C->kern_make_pulse_pass_2 = C->kern_make_pulse_pass_2_range;
} else {
C->kern_make_pulse_pass_2 = C->kern_make_pulse_pass_2_group;
}
if (C->verb > 1) {
rsprint("Pass 2 global =%7s local = %3zu x %2lu = %6s B groups = %3d%s N = %9s\n",
commaint(C->make_pulse_params.global[1]),
C->make_pulse_params.local[1],
C->make_pulse_params.local_mem_size[1] / C->make_pulse_params.local[1] / sizeof(cl_float4),
commaint(C->make_pulse_params.local_mem_size[1]),
C->make_pulse_params.group_counts[1],
C->make_pulse_params.cl_pass_2_method == RS_CL_PASS_2_IN_RANGE ? "R" :
(C->make_pulse_params.cl_pass_2_method == RS_CL_PASS_2_IN_LOCAL ? "L" : "U"),
commaint(C->make_pulse_params.entry_counts[1]));
}
ret = CL_SUCCESS;
ret |= clSetKernelArg(C->kern_make_pulse_pass_2, 0, sizeof(cl_mem), &C->pulse);
ret |= clSetKernelArg(C->kern_make_pulse_pass_2, 1, sizeof(cl_mem), &C->work);
ret |= clSetKernelArg(C->kern_make_pulse_pass_2, 2, C->make_pulse_params.local_mem_size[1], NULL);
ret |= clSetKernelArg(C->kern_make_pulse_pass_2, 3, sizeof(unsigned int), &C->make_pulse_params.range_count);
ret |= clSetKernelArg(C->kern_make_pulse_pass_2, 4, sizeof(unsigned int), &C->make_pulse_params.entry_counts[1]);
if (ret != CL_SUCCESS) {
fprintf(stderr, "%s : RS : Error: Failed to set arguments for kernel make_pulse_pass_2().\n", now());
exit(EXIT_FAILURE);
}
#endif
if (C->mem_usage > C->mem_size / 4 * 3) {
rsprint("WARNING: High GPU memory usage by workers[%d]: %s GB out of %s GB.", C->name, commafloat((float)C->mem_usage * 1.0e-9f), commafloat((float)C->mem_size * 1.0e-9f));
}
if (C->verb) {
rsprint("workers[%d] memory usage = %s B\n", C->name, commaint(C->mem_usage));
}
}
void RS_update_computed_properties(RSHandle *H) {
H->params.prf = 1.0f / H->params.prt;
H->params.va = 0.25f * H->params.lambda * H->params.prf;
H->params.fn = 0.5 / H->params.prf;
H->params.antenna_bw_rad = H->params.antenna_bw_deg / 180.0f * M_PI;
H->params.dr = 0.5f * H->params.c * H->params.tau;
}
#pragma mark -
#pragma mark RS Convenient functions
cl_uint RS_gpu_count(void) {
cl_uint num_devs;
cl_device_id devs[RS_MAX_GPU_DEVICE];
cl_uint num_cus[RS_MAX_GPU_DEVICE];
cl_uint vendors[RS_MAX_GPU_DEVICE];
get_device_info(CL_DEVICE_TYPE_GPU, &num_devs, devs, num_cus, vendors, 0);
return num_devs;
}
char *RS_version_string(void) {
static char string[16];
sprintf(string, "%s", RS_VERSION_STRING);
return string;
}
int RS_indent_copy(char *dst, char *src, const int width) {
int k = 0;
char *e, *s = src;
if (width == 0) {
k = sprintf(dst, "%s", src);
return k;
}
char indent[width + 1];
memset(indent, ' ', width);
indent[width] = '\0';
do {
e = strchr(s, '\n');
if (e) {
*e = '\0';
k += sprintf(dst + k, "%s%s\n", indent, s);
s = e + 1;
}
} while (e != NULL);
k += sprintf(dst + k, "%s%s", indent, s);
return k;
}
#pragma mark -
#pragma mark RS Initialization and Deallocation
RSHandle *RS_init_with_path(const char *bundle_path, RSMethod method, const uint8_t gpu_mask, cl_context_properties sharegroup, const char verb) {
int i, k;
RSHandle *H;
// Allocate
if (posix_memalign((void **)((uintptr_t)&H), RS_ALIGN_SIZE, sizeof(RSHandle))) {
rsprint("ERROR: Unable to initialize RS Framework.");
return NULL;
}
memset(H, 0, sizeof(RSHandle));
// Default non-zero parameters
H->sim_tic = 0.0f;
H->status = RSStatusNull;
H->params.c = 3.0e8f;
H->params.tau = 0.2e-6f;
H->params.body_per_cell = RS_BODY_PER_CELL;
H->params.domain_pad_factor = RS_DOMAIN_PAD;
H->num_workers = 1;
H->num_types = 1;
H->method = method;
H->random_seed = <PASSWORD>;
for (i = 0; i < RS_MAX_GPU_DEVICE; i++) {
H->workers[i].name = i;
}
for (i = 0; i < RS_MAX_DEBRIS_TYPES; i++) {
H->counts[i] = 0;
}
if (H->method == RS_METHOD_GPU) {
if (verb) {
rsprint("Getting CL devices ...");
}
// Get and show some device info
get_device_info(CL_DEVICE_TYPE_GPU, &H->num_devs, H->devs, H->num_cus, H->vendors, verb);
} else if (H->method == RS_METHOD_CPU) {
// Run this to get the num_cus to the same values.
get_device_info(CL_DEVICE_TYPE_CPU, &H->num_devs, H->devs, H->num_cus, H->vendors, 0);
}
if (H->num_devs == 0 || H->num_cus[0] == 0) {
rsprint("ERROR: No OpenCL devices found.");
return NULL;
}
H->num_workers = MIN(__builtin_popcount(gpu_mask), H->num_devs);
rsprint("gpu_mask = 0x%x count = %d num_workers = %d\n", gpu_mask, __builtin_popcount(gpu_mask), H->num_workers);
switch (H->vendors[0]) {
case RS_GPU_VENDOR_AMD:
case RS_GPU_VENDOR_INTEL:
H->preferred_multiple = H->num_cus[0] * 16;
break;
case RS_GPU_VENDOR_NVIDIA:
H->preferred_multiple = H->num_cus[0] * 64;
break;
default:
H->preferred_multiple = H->num_cus[0] * 256;
break;
}
#if defined (GUI)
// Force to one GPU at the moment. Seems like OpenGL context can be shared with only one OpenCL context
H->num_workers = 1;
#endif
#if defined (_USE_GCL_)
H->num_workers = 1;
for (i = 0; i < H->num_workers; i++) {
if (verb > 2) {
rsprint("Initializing worker %d using %p\n", i, H->devs[i]);
}
RS_worker_init(&H->workers[i], H->devs[i], 0, NULL, sharegroup, verb);
}
#else
cl_uint count;
char *src_ptr[RS_MAX_KERNEL_LINES];
// Kernel source
if (!strcmp(bundle_path, ".")) {
count = read_kernel_source_from_files(src_ptr, "rs_enum.h", "rs.cl", NULL);
} else {
char enum_h_path[RS_MAX_STR];
snprintf(enum_h_path, RS_MAX_STR, "%s/rs_enum.h", bundle_path);
#ifdef INCLUDE_TYPES_IN_KERNEL
// This version combines special types along with the kernel functions
char types_h_path[RS_MAX_STR];
char kern_src_path[RS_MAX_STR];
snprintf(types_h_path, RS_MAX_STR, "%s/rs_types.h", bundle_path);
snprintf(kern_src_path, RS_MAX_STR, "%s/rs.cl", bundle_path);
count = read_kernel_source_from_files(src_ptr, enum_h_path, types_h_path, kern_src_path, NULL);
#else
// This version does not depend on custom types
char kern_src_path[RS_MAX_STR];
snprintf(kern_src_path, RS_MAX_STR, "%s/rs.cl", bundle_path);
count = read_kernel_source_from_files(src_ptr, enum_h_path, kern_src_path, NULL);
#endif
}
if (count == 0) {
rsprint("Empty kernel source.");
return NULL;
}
i = 0;
for (k = 0; k < H->num_devs; k++) {
if ((gpu_mask & (1 << k)) == 0) {
continue;
}
if (verb > 1) {
rsprint("Initializing worker %d using device %d @ %p\n", i, k, H->devs[k]);
}
RS_worker_init(&H->workers[i], H->devs[i], count, (const char **)src_ptr, sharegroup, verb);
i++;
}
#endif
// Temporary supress the verbose output for setting default values; or very verbosy for heavy debug version
#if defined(DEBUG_HEAVY)
H->verb = 3;
#else
H->verb = verb > 1 ? verb : 0;
#endif
// Set up some basic parameters to default values, H->verb is still 0 so no API message output
RS_set_prt(H, RS_PARAMS_PRT);
RS_set_lambda(H, RS_PARAMS_LAMBDA);
RS_set_antenna_params(H, RS_PARAMS_BEAMWIDTH, 50.0f);
RS_set_tx_params(H, RS_PARAMS_TAU, 50.0e3f);
RS_set_beam_pos(H, 5.0f, 1.0f);
RS_set_sampling_spacing(H, RS_PARAMS_GATEWIDTH, RS_PARAMS_BEAMWIDTH, RS_PARAMS_BEAMWIDTH);
H->verb = verb;
return H;
}
RSHandle *RS_init_for_selected_gpu(const uint8_t gpu_mask, const char verb) {
return RS_init_with_path(".", RS_METHOD_GPU, gpu_mask, 0, verb);
}
RSHandle *RS_init_for_cpu_verbose(const char verb) {
return RS_init_with_path(".", RS_METHOD_CPU, 0, 0, verb);
}
RSHandle *RS_init_verbose(const char verb) {
return RS_init_with_path(".", RS_METHOD_GPU, 0xFF, 0, verb);
}
RSHandle *RS_init() {
return RS_init_with_path(".", RS_METHOD_GPU, 0xFF, 0, 0);
}
void RS_free_scat_memory(RSHandle *H) {
int i;
if (H->verb > 2) {
rsprint("Freeing GPU memories ...");
}
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].vbo_scat_pos == 0) {
rsprint("ERROR: Unexpected conditions. VBOs were not shared.");
return;
}
gcl_free(H->workers[i].scat_pos);
gcl_free(H->workers[i].scat_clr); // Only the GUI version has this
gcl_free(H->workers[i].scat_vel);
gcl_free(H->workers[i].scat_ori);
gcl_free(H->workers[i].scat_tum);
gcl_free(H->workers[i].scat_aux);
gcl_free(H->workers[i].scat_rcs);
gcl_free(H->workers[i].scat_sig);
gcl_free(H->workers[i].work);
gcl_free(H->workers[i].pulse);
gcl_free(H->workers[i].scat_rnd);
}
#else
for (i = 0; i < H->num_workers; i++) {
clReleaseMemObject(H->workers[i].scat_pos);
clReleaseMemObject(H->workers[i].scat_clr);
clReleaseMemObject(H->workers[i].scat_vel);
clReleaseMemObject(H->workers[i].scat_ori);
clReleaseMemObject(H->workers[i].scat_tum);
clReleaseMemObject(H->workers[i].scat_aux);
clReleaseMemObject(H->workers[i].scat_rcs);
clReleaseMemObject(H->workers[i].scat_sig);
clReleaseMemObject(H->workers[i].work);
clReleaseMemObject(H->workers[i].pulse);
clReleaseMemObject(H->workers[i].scat_rnd);
}
#endif
if (H->verb > 2) {
rsprint("Freeing CPU memories ...");
}
free(H->scat_pos);
free(H->scat_vel);
free(H->scat_ori);
free(H->scat_tum);
free(H->scat_aux);
free(H->scat_rcs);
free(H->scat_sig);
free(H->scat_rnd);
free(H->pulse);
for (i = 0; i < H->num_workers; i++) {
free(H->pulse_tmp[i]);
}
}
void RS_free(RSHandle *H) {
int i;
char v = H->verb;
LES_free(H->L);
if (H->O) {
OBJ_free(H->O);
}
for (i = 0; i < H->num_workers; i++) {
RS_worker_free(&H->workers[i]);
}
RS_free_scat_memory(H);
free(H->anchor_pos);
free(H->anchor_lines);
if (H->dsd_r != NULL) {
free(H->dsd_r);
free(H->dsd_pdf);
free(H->dsd_cdf);
free(H->dsd_pop);
}
free(H);
if (v) {
rsprint("Resources released.");
}
}
RSMakePulseParams RS_make_pulse_params(const cl_uint count,
const cl_uint group_size_multiple,
const cl_uint user_max_groups,
const cl_uint max_local_mem_size,
const float range_start,
const float range_delta,
const unsigned int range_count) {
RSMakePulseParams param;
// Keep a copy for reference
param.num_scats = count;
param.user_max_groups = user_max_groups;
param.user_max_work_items = group_size_multiple;
param.range_start = range_start;
param.range_delta = range_delta;
param.range_count = MAX(1, range_count);
// The 2nd pass kernel functions are only for work_items <= 1024.
if (user_max_groups > 1024) {
fprintf(stderr, "%s : RS : I'm not programmed to handle user_max_groups > 1024.\n", now());
param.user_max_groups = 1024;
}
// Work items is at most count / 2
unsigned int work_items = count > param.user_max_work_items * 2 ? param.user_max_work_items : count / 2;
//unsigned int work_items = count > param.user_max_work_items * 2 ? param.user_max_work_items : (count + 1) / 2;
// Number of group item-pairs
//unsigned int group_count = count <= work_items * 2 ? 1 : count / (work_items * 2);
unsigned int group_count = count <= work_items * 2 ? 1 : (count + work_items * 2 - 1) / (work_items * 2);
if (group_count > param.user_max_groups) {
group_count = param.user_max_groups;
}
// printf("RS_make_pulse_params() count=%d work_items=%d group_count=%d/%d\n", count, work_items, group_count, param.user_max_groups);
// 1st pass
param.entry_counts[0] = count;
param.group_counts[0] = group_count;
param.global[0] = group_count * work_items;
param.local[0] = work_items;
param.local_mem_size[0] = range_count * work_items * sizeof(cl_float4);
while (param.local_mem_size[0] > max_local_mem_size) {
#ifdef DEBUG_CL
rsprint("Local memory size = %s. Adjusting ...", commaint((long long)max_local_mem_size));
#endif
if (range_count % 2 == 0) {
work_items /= 2;
group_count *= 2;
if (group_count > param.user_max_groups) {
group_count = param.user_max_groups;
}
param.group_counts[0] = group_count;
param.global[0] = group_count * work_items;
param.local[0] = work_items;
param.local_mem_size[0] = range_count * work_items * sizeof(cl_float4);
} else {
rsprint("ERROR: Could not resolve local memory size limits.");
exit(EXIT_FAILURE);
}
}
// 2nd pass
unsigned int work_count = group_count * param.range_count;
param.entry_counts[1] = work_count;
work_items = work_count / (param.range_count * 2);
if (work_items < 1) {
fprintf(stderr, "%s : RS : 2nd pass with CL work_items = %u < 2?\n", now(), work_items);
work_items = 1;
}
if (param.local[0] % param.range_count == 0 && group_size_multiple >= work_items) {
//
param.cl_pass_2_method = RS_CL_PASS_2_UNIVERSAL;
param.group_counts[1] = 1;
param.global[1] = work_items;
param.local[1] = work_items;
param.local_mem_size[1] = work_items * sizeof(cl_float4);
} else if (group_count >= 2 * param.range_count && group_size_multiple >= work_items) {
//
param.cl_pass_2_method = RS_CL_PASS_2_IN_LOCAL;
param.group_counts[1] = 1;
param.global[1] = work_items;
param.local[1] = work_items;
param.local_mem_size[1] = work_items * sizeof(cl_float4);
} else {
//
param.cl_pass_2_method = RS_CL_PASS_2_IN_RANGE;
param.group_counts[1] = 1;
param.global[1] = param.range_count;
param.local[1] = 1;
param.local_mem_size[1] = sizeof(cl_float4);
}
if (param.entry_counts[1] > RS_MAX_GATES * work_items) {
fprintf(stderr, "%s : RS : H->dev_work may not be large enough.\n", now());
}
return param;
}
#pragma mark -
#pragma mark Properties
void RS_set_concept(RSHandle *H, RSSimulationConcept c) {
H->sim_concept = c;
}
char *RS_simulation_concept_string(RSHandle *H) {
static char string[32];
sprintf(string,
"Concepts used: %s%s%s%s%s%s%s",
H->sim_concept & RSSimulationConceptBoundedParticleVelocity ? "B" : "",
H->sim_concept & RSSimulationConceptDebrisFluxFromVelocity ? "C" : "",
H->sim_concept & RSSimulationConceptDraggedBackground ? "D" : "",
H->sim_concept & RSSimulationConceptFixedScattererPosition ? "F" : "",
H->sim_concept & RSSimulationConceptTransparentBackground ? "T" : "",
H->sim_concept & RSSimulationConceptUniformDSDScaledRCS ? "U" : "",
H->sim_concept & RSSimulationConceptVerticallyPointingRadar ? "V" :"");
return string;
}
char *RS_simulation_concept_bulleted_string(RSHandle *H) {
static char string[1024];
sprintf(string, "Concepts used:\n");
if (H->sim_concept & RSSimulationConceptBoundedParticleVelocity) {
sprintf(string + strlen(string), RS_INDENT "o B - Bounded Particle Velocity\n");
}
if (H->sim_concept & RSSimulationConceptDebrisFluxFromVelocity) {
sprintf(string + strlen(string), RS_INDENT "o C - Debris Concentration from Velocity\n");
}
if (H->sim_concept & RSSimulationConceptDraggedBackground) {
sprintf(string + strlen(string), RS_INDENT "o D - Dragged Meteorological Scatterers\n");
}
if (H->sim_concept & RSSimulationConceptFixedScattererPosition) {
sprintf(string + strlen(string), RS_INDENT "o F - Fixed Scatterer Positions\n");
}
if (H->sim_concept & RSSimulationConceptTransparentBackground) {
sprintf(string + strlen(string), RS_INDENT "o T - Transparent Meteorological Scatterers\n");
}
if (H->sim_concept & RSSimulationConceptUniformDSDScaledRCS) {
sprintf(string + strlen(string), RS_INDENT "o U - Uniform DSD with Scaled RCS\n");
}
if (H->sim_concept & RSSimulationConceptVerticallyPointingRadar) {
sprintf(string + strlen(string), RS_INDENT "o V - Vertically Pointing Radar\n");
}
return string;
}
void RS_set_prt(RSHandle *H, const RSfloat prt) {
// RSfloat tic_toc_left = H->sim_toc - H->sim_tic;
//
// RSfloat toc_offset = H->params.prt / prt * tic_toc_left;
//
// if (tic_toc_left + toc_offset < 0) {
// H->sim_tic = 0;
// H->sim_toc = (size_t)(H->vel_desc.tp / prt);
// } else {
// H->sim_toc += toc_offset;
// }
H->params.prt = prt;
H->sim_desc.s[RSSimulationDescriptionPRT] = H->params.prt;
RS_update_computed_properties(H);
}
void RS_set_lambda(RSHandle *H, const RSfloat lambda) {
H->params.lambda = lambda;
H->sim_desc.s[RSSimulationDescriptionWaveNumber] = 4.0f * M_PI / H->params.lambda;
RS_update_computed_properties(H);
}
void RS_set_density(RSHandle *H, const RSfloat density) {
if (H->status & RSStatusDomainPopulated) {
rsprint("Simulation domain has been populated. Density cannot be changed.");
return;
}
// if (H->status & RSStatusPopulationDefined) {
// rsprint("ERROR: Population already defined. RS_set_density() should come before RS_set_scan_box().\n");
// exit(EXIT_FAILURE);
// }
H->params.body_per_cell = density;
RS_update_computed_properties(H);
}
void RS_set_antenna_params(RSHandle *H, RSfloat beamwidth_deg, RSfloat gain_dbi) {
if (H->status & RSStatusDomainPopulated) {
rsprint("Simulation domain has been populated. Radar antenna parameters cannot be changed.");
return;
}
H->params.antenna_bw_deg = beamwidth_deg;
H->params.antenna_gain_dbi = gain_dbi;
RS_update_computed_properties(H);
RS_set_angular_weight_to_standard(H, H->params.antenna_bw_rad);
}
void RS_set_tx_params(RSHandle *H, RSfloat pulsewidth, RSfloat tx_power_watt) {
if (H->status & RSStatusDomainPopulated) {
rsprint("Simulation domain has been populated. Radar parameters cannot be changed.");
return;
}
H->params.tau = pulsewidth;
H->params.tx_power_watt = tx_power_watt;
RS_update_computed_properties(H);
RS_set_range_weight_to_triangle(H, H->params.dr);
}
void RS_set_sampling_spacing(RSHandle *H, const RSfloat range, const RSfloat azimuth, const RSfloat elevation) {
H->params.range_delta = range;
H->params.azimuth_delta_deg = azimuth;
H->params.elevation_delta_deg = elevation;
}
void RS_set_scan_box(RSHandle *H, RSBox box) {
if (H->params.range_delta == 0.0f) {
H->params.range_delta = RS_PARAMS_GATEWIDTH;
}
if (H->params.azimuth_delta_deg == 0.0f) {
H->params.azimuth_delta_deg = RS_PARAMS_BEAMWIDTH;
}
if (H->params.elevation_delta_deg == 0.0f) {
H->params.elevation_delta_deg = RS_PARAMS_BEAMWIDTH;
}
RS_set_scan_extent(H,
box.origin.r, box.origin.r + box.size.r, H->params.range_delta, // Range
box.origin.a, box.origin.a + box.size.a, H->params.azimuth_delta_deg, // Azimuth
box.origin.e, box.origin.e + box.size.e, H->params.elevation_delta_deg); // Elevation
}
// This method also suggests number of scatterer to be used based on the scatterer / resolution volume rule.
void RS_set_scan_extent(RSHandle *H,
RSfloat range_start, RSfloat range_end, RSfloat range_delta,
RSfloat azimuth_start, RSfloat azimuth_end, RSfloat azimuth_delta,
RSfloat elevation_start, RSfloat elevation_end, RSfloat elevation_delta) {
if (H->status & RSStatusDomainPopulated) {
rsprint("Simulation domain has been populated. Scan box cannot be changed.");
return;
}
//rsprint("%.2f %.2f %.2f / %.2f %.2f %.2f\n", azimuth_start, azimuth_end, azimuth_delta, elevation_start, elevation_end, elevation_delta);
// H->status &= !RSStatusDomainPopulated;
H->params.range_start = range_start;
H->params.range_end = range_end;
H->params.range_delta = range_delta;
H->params.azimuth_start_deg = azimuth_start;
H->params.azimuth_end_deg = azimuth_end;
H->params.azimuth_delta_deg = azimuth_delta;
H->params.elevation_start_deg = elevation_start;
H->params.elevation_end_deg = elevation_end;
H->params.elevation_delta_deg = elevation_delta;
bool is_full_sweep = fabs(azimuth_start - 0.0f) < 0.01f && fabs(azimuth_end - 360.0f) < 0.01f;
const RSfloat r_lo = floor(MAX(H->params.range_delta, H->params.range_start - H->params.domain_pad_factor * H->params.dr) / H->params.range_delta) * H->params.range_delta;
const RSfloat r_hi = ceil(MIN(10.0e3f, H->params.range_end + H->params.domain_pad_factor * H->params.dr) / H->params.range_delta) * H->params.range_delta;
const RSfloat az_lo = is_full_sweep ? 0.0f : floor(H->params.azimuth_start_deg - H->params.domain_pad_factor * H->params.antenna_bw_deg) / H->params.antenna_bw_deg * H->params.antenna_bw_deg;
const RSfloat az_hi = is_full_sweep ? 360.0f : ceil(H->params.azimuth_end_deg + H->params.domain_pad_factor * H->params.antenna_bw_deg) / H->params.antenna_bw_deg * H->params.antenna_bw_deg;
const RSfloat el_lo = floor(MAX(0.0f, H->params.elevation_start_deg - H->params.domain_pad_factor * H->params.antenna_bw_deg) / H->params.antenna_bw_deg) * H->params.antenna_bw_deg;
const RSfloat el_hi = ceil(MIN(90.0f, H->params.elevation_end_deg + H->params.domain_pad_factor * H->params.antenna_bw_deg) / H->params.antenna_bw_deg) * H->params.antenna_bw_deg;
const RSfloat tiny = 1.0e-5f;
// printf("range_start = %.2f r_lo = %.2f %.2f %.2f %.1f ... %.2f\n",
// H->params.range_start, r_lo, H->params.dr, H->params.range_delta, H->params.domain_pad_factor,
// MAX(H->params.range_delta, H->params.range_start - H->params.domain_pad_factor * H->params.dr));
int nr = 0;
int naz = 0;
int nel = 0;
RSfloat
xmin = INFINITY, xmax = -INFINITY,
ymin = INFINITY, ymax = -INFINITY,
zmin = INFINITY, zmax = -INFINITY;
RSfloat r;
RSfloat az;
RSfloat el;
int ii = 0;
if (H->verb) {
rsprint("Deriving scan box ... BW %.2f deg DR %.2f m\n", H->params.antenna_bw_deg, H->params.range_delta);
}
// Range
r = floor(H->params.range_start / H->params.range_delta) * H->params.range_delta;
while (r <= ceil(H->params.range_end / H->params.range_delta) * H->params.range_delta) {
r += H->params.range_delta;
nr++;
}
H->params.range_count = MIN(RS_MAX_GATES, nr);
if (H->verb > 1) {
rsprint(" o Domain range ... %.2f ~ %.2f (%d)\n", r_lo, r_hi, nr);
}
if (H->sim_concept & RSSimulationConceptVerticallyPointingRadar) {
const RSfloat delta = MIN(elevation_delta, azimuth_delta);
const RSfloat edge = el_lo;
// Raster grid for AZ, EL
if (H->verb) {
rsprint("Spherical grid for a profiler: xx / yy = [ %.2f ... %.2f ] deg\n", edge - 90.0f, 90.0f - edge);
}
RSfloat xx;
RSfloat yy = edge - 90.0f;
while (yy < (90.0f - edge) + tiny) {
xx = el_lo - 90.0f;
while (xx < (90.0f - edge) + tiny) {
az = atan2f(yy, xx);
el = 90.0f - sqrtf(yy * yy + xx * xx);
#if defined(DEBUG_POS)
rsprint("ii %d xx = %.2f yy = %.2f AZ %.2f EL %.2f\n", naz, xx, yy, az, el);
#endif
xx += delta;
ii++;
}
yy += delta;
}
// Total number of anchors, add one for radar origin
H->num_anchors = 2 * ii + 1;
if (H->anchor_pos) {
if (H->verb > 2) {
rsprint("Freeing existing anchor memory.");
}
free(H->anchor_pos);
}
H->anchor_pos = (cl_float4 *)malloc(H->num_anchors * sizeof(cl_float4));
if (H->anchor_pos == NULL) {
rsprint("ERROR: Unable to allocate memory for anchors.");
return;
}
// Now populate an actual array
ii = 0;
yy = edge - 90.0f;
while (yy < (90.0f - edge) + tiny) {
xx = edge - 90.0f;
while (xx < (90.0f - edge) + tiny) {
az = atan2f(yy, xx);
el = 90.0f - sqrtf(yy * yy + xx * xx);
#if defined(DEBUG_POS)
rsprint("ii %d xx = %.2f yy = %.2f AZ %.2f EL %.2f\n", ii, xx, yy, az, el);
#endif
el = el / 180.0f * M_PI;
H->anchor_pos[ii].x = r_lo * cos(el) * sin(az);
H->anchor_pos[ii].y = r_lo * cos(el) * cos(az);
H->anchor_pos[ii].z = r_lo * sin(el);
H->anchor_pos[ii].w = 1.0f;
xmin = H->anchor_pos[ii].x < xmin ? H->anchor_pos[ii].x : xmin;
xmax = H->anchor_pos[ii].x > xmax ? H->anchor_pos[ii].x : xmax;
ymin = H->anchor_pos[ii].y < ymin ? H->anchor_pos[ii].y : ymin;
ymax = H->anchor_pos[ii].y > ymax ? H->anchor_pos[ii].y : ymax;
zmin = H->anchor_pos[ii].z < zmin ? H->anchor_pos[ii].z : zmin;
zmax = H->anchor_pos[ii].z > zmax ? H->anchor_pos[ii].z : zmax;
ii++;
H->anchor_pos[ii].x = r_hi * cos(el) * sin(az);
H->anchor_pos[ii].y = r_hi * cos(el) * cos(az);
H->anchor_pos[ii].z = r_hi * sin(el);
H->anchor_pos[ii].w = 1.0f;
xmin = H->anchor_pos[ii].x < xmin ? H->anchor_pos[ii].x : xmin;
xmax = H->anchor_pos[ii].x > xmax ? H->anchor_pos[ii].x : xmax;
ymin = H->anchor_pos[ii].y < ymin ? H->anchor_pos[ii].y : ymin;
ymax = H->anchor_pos[ii].y > ymax ? H->anchor_pos[ii].y : ymax;
zmin = H->anchor_pos[ii].z < zmin ? H->anchor_pos[ii].z : zmin;
zmax = H->anchor_pos[ii].z > zmax ? H->anchor_pos[ii].z : zmax;
ii++;
xx += delta;
}
yy += delta;
}
naz = 360;
nel = 1;
if (H->P) {
POSPattern *pattern = (POSPattern *)H->P;
nel = pattern->count;
}
} else {
// Azimuth
az = az_lo;
while (az <= az_hi + tiny) {
az += H->params.azimuth_delta_deg;
naz++;
}
if (H->verb > 1) {
rsprint(" o Domain azimuth ... %.2f ~ %.2f (%d)\n", az_lo, az_hi, naz);
}
// Elevation
el = el_lo;
while (el <= el_hi) {
el += H->params.elevation_delta_deg;
nel++;
}
if (H->verb > 1) {
rsprint(" o Domain elevation ... %.2f ~ %.2f (%d)\n", el_lo, el_hi, nel);
}
// Zero volume
if (naz == 0 || nel == 0) {
rsprint("NEL = %d and/or NAZ = %d resulted in a zero volumne.\n", naz, nel);
return;
}
// Evaluate the number of scatterers needed
H->num_anchors = 2 * naz * nel + 1; // Save one for radar origin
if (H->anchor_pos) {
if (H->verb > 2) {
rsprint("Freeing existing anchor memory.");
}
free(H->anchor_pos);
}
H->anchor_pos = (cl_float4 *)malloc(H->num_anchors * sizeof(cl_float4));
if (H->anchor_pos == NULL) {
rsprint("ERROR: Unable to allocate memory for anchors.");
return;
}
// Domain size
el = el_lo / 180.0f * M_PI;
while (el <= el_hi / 180.0f * M_PI + tiny && ii < H->num_anchors - 1) {
az = az_lo / 180.0f * M_PI;
while ((is_full_sweep && az < (az_hi - azimuth_delta) / 180.0f * M_PI) || (!is_full_sweep && az <= az_hi / 180.0f * M_PI + tiny && ii < H->num_anchors - 1)) {
//rsprint("ii %d AZ %.2f EL %.2f\n", ii, az / M_PI * 180.0f, el / M_PI * 180.0f);
H->anchor_pos[ii].x = r_lo * cos(el) * sin(az);
H->anchor_pos[ii].y = r_lo * cos(el) * cos(az);
H->anchor_pos[ii].z = r_lo * sin(el);
H->anchor_pos[ii].w = 1.0f;
xmin = H->anchor_pos[ii].x < xmin ? H->anchor_pos[ii].x : xmin;
xmax = H->anchor_pos[ii].x > xmax ? H->anchor_pos[ii].x : xmax;
ymin = H->anchor_pos[ii].y < ymin ? H->anchor_pos[ii].y : ymin;
ymax = H->anchor_pos[ii].y > ymax ? H->anchor_pos[ii].y : ymax;
zmin = H->anchor_pos[ii].z < zmin ? H->anchor_pos[ii].z : zmin;
zmax = H->anchor_pos[ii].z > zmax ? H->anchor_pos[ii].z : zmax;
ii++;
H->anchor_pos[ii].x = r_hi * cos(el) * sin(az);
H->anchor_pos[ii].y = r_hi * cos(el) * cos(az);
H->anchor_pos[ii].z = r_hi * sin(el);
H->anchor_pos[ii].w = 1.0f;
xmin = H->anchor_pos[ii].x < xmin ? H->anchor_pos[ii].x : xmin;
xmax = H->anchor_pos[ii].x > xmax ? H->anchor_pos[ii].x : xmax;
ymin = H->anchor_pos[ii].y < ymin ? H->anchor_pos[ii].y : ymin;
ymax = H->anchor_pos[ii].y > ymax ? H->anchor_pos[ii].y : ymax;
zmin = H->anchor_pos[ii].z < zmin ? H->anchor_pos[ii].z : zmin;
zmax = H->anchor_pos[ii].z > zmax ? H->anchor_pos[ii].z : zmax;
ii++;
az += azimuth_delta / 180.0f * M_PI;
}
el += elevation_delta / 180.0f * M_PI;
}
}
if (H->verb) {
rsprint("Anchors computed num_anchors = %d\n", H->num_anchors);
}
// Radar origin at (0, 0, 0)
H->anchor_pos[ii].x = 0.0f;
H->anchor_pos[ii].y = 0.0f;
H->anchor_pos[ii].z = 0.0f;
H->anchor_pos[ii].w = 5.0f;
//printf("H->num_anchors = %zu ii = %d\n", H->num_anchors, ii);
// The closing domain of the simulation
H->sim_desc.s[RSSimulationDescriptionBoundSizeX] = xmax - xmin;
H->sim_desc.s[RSSimulationDescriptionBoundSizeY] = ymax - ymin;
H->sim_desc.s[RSSimulationDescriptionBoundSizeZ] = zmax - zmin;
H->sim_desc.s[RSSimulationDescriptionBoundOriginX] = xmin;
H->sim_desc.s[RSSimulationDescriptionBoundOriginY] = ymin;
H->sim_desc.s[RSSimulationDescriptionBoundOriginZ] = zmin;
sprintf(H->summary,
"User domain @\n R:[ %6.2f ~ %6.2f ] km\n E:[ %6.2f ~ %6.2f ] deg\n A:[ %+7.2f ~ %+7.2f ] deg\n",
1.0e-3 * H->params.range_start, 1e-3 * H->params.range_end,
H->params.elevation_start_deg, H->params.elevation_end_deg,
H->params.azimuth_start_deg, H->params.azimuth_end_deg);
sprintf(H->summary + strlen(H->summary),
"Work domain @\n R:[ %6.2f ~ %6.2f ] km (%d gates)\n E:[ %6.2f ~ %6.2f ] deg (%d rays)\n A:[ %+7.2f ~ %+7.2f ] deg (%d rays)\n",
1.0e-3 * r_lo, 1.0e-3 * r_hi, H->params.range_count,
el_lo, el_hi, nel,
az_lo, az_hi, H->sim_concept & RSSimulationConceptVerticallyPointingRadar ? nel : naz);
sprintf(H->summary + strlen(H->summary),
"==\n X:[ %7.2f ~ %7.2f ] (%.2f) m\n Y:[ %7.2f ~ %7.2f ] (%.2f) m\n Z:[ %7.2f ~ %7.2f ] (%.2f) m\n",
xmin, xmax, H->sim_desc.s[RSSimulationDescriptionBoundSizeX],
ymin, ymax, H->sim_desc.s[RSSimulationDescriptionBoundSizeY],
zmin, zmax, H->sim_desc.s[RSSimulationDescriptionBoundSizeZ]);
sprintf(H->summary + strlen(H->summary),
"Concepts used: %s\n", RS_simulation_concept_string(H));
if (H->verb) {
rsprint("User domain @ R:[ %5.2f ~ %5.2f ] km E:[ %5.2f ~ %5.2f ] deg A:[ %+7.2f ~ %+7.2f ] deg\n",
1.0e-3 * H->params.range_start, 1e-3 * H->params.range_end,
H->params.elevation_start_deg, H->params.elevation_end_deg,
H->params.azimuth_start_deg, H->params.azimuth_end_deg);
rsprint("Work domain @ R:[ %5.2f ~ %5.2f ] km E:[ %5.2f ~ %5.2f ] deg A:[ %+7.2f ~ %+7.2f ] deg\n",
1.0e-3 * r_lo, 1.0e-3 * r_hi,
el_lo, el_hi,
az_lo, az_hi);
rsprint(" @ R:[ %-3d ] E:[ %-3d ] A:[ %-3d ]",
H->params.range_count, nel, naz);
rsprint(" @ X:[ %.2f ~ %.2f ] m Y:[ %.2f ~ %.2f ] m Z:[ %.2f ~ %.2f ] m\n",
xmin, xmax,
ymin, ymax,
zmin, zmax);
rsprint(" = ( %.2f m x %.2f m x %.2f m )\n",
xmax - xmin, ymax - ymin, zmax - zmin);
if (H->sim_concept == RSSimulationConceptNull) {
rsprint("No special concepts are active.\n");
} else {
rsprint("%s", RS_simulation_concept_bulleted_string(H));
}
}
RS_revise_population(H);
// Anchor lines to show the volume of interest, which was set by the user. The number is well more than enough
H->num_anchor_lines = 8 * (naz + nel);
if (H->anchor_lines) {
if (H->verb > 2) {
printf("%s : RS : Freeing existing anchor_line memory.\n", now());
}
free(H->anchor_lines);
}
H->anchor_lines = (cl_float4 *)malloc(H->num_anchor_lines * sizeof(cl_float4));
if (H->anchor_lines == NULL) {
rsprint("ERROR: Unable to allocate memory for anchor_lines.");
return;
}
ii = 0;
//
// o----(4)----o (end)
// /: /|
// / : / |
// (B)(7) (C)(8) EL
// / : / |
// / : / |
// o----(3)---[2]....o (end) (start)
// | / | /
// | / | /
// (5)(9) (6)(A) R
// | / | /
// |/ |/
// o----(1)----o (start)
//
// (start) AZ (end)
//
if (H->params.elevation_start_deg < 90.0f) {
// Line 1
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
while (az < (H->params.azimuth_end_deg - H->params.azimuth_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
az += H->params.azimuth_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
// Line 2
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
while (az < (H->params.azimuth_end_deg - H->params.azimuth_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
az += H->params.azimuth_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
}
if (H->params.elevation_end_deg < 90.0f) {
// Line 3
el = H->params.elevation_end_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
while (az < (H->params.azimuth_end_deg - H->params.azimuth_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
az += H->params.azimuth_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
// Line 4
el = H->params.elevation_end_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
while (az < (H->params.azimuth_end_deg - H->params.azimuth_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
az += H->params.azimuth_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
// Line 5
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
while (el < (H->params.elevation_end_deg - H->params.elevation_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
el += H->params.elevation_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
// Line 6
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_end_deg / 180.0 * M_PI;
while (el < (H->params.elevation_end_deg - H->params.elevation_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
el += H->params.elevation_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
// Line 7
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
while (el < (H->params.elevation_end_deg - H->params.elevation_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
el += H->params.elevation_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
// Line 8
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_end_deg / 180.0 * M_PI;
while (el < (H->params.elevation_end_deg - H->params.elevation_delta_deg) / 180.0 * M_PI + tiny && ii < H->num_anchor_lines - 1) {
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
el += H->params.elevation_delta_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
}
if (!is_full_sweep) {
// Line 9
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
// Line A
el = H->params.elevation_start_deg / 180.0 * M_PI;
az = H->params.azimuth_end_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
// Line B
el = H->params.elevation_end_deg / 180.0 * M_PI;
az = H->params.azimuth_start_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
// Line C
el = H->params.elevation_end_deg / 180.0 * M_PI;
az = H->params.azimuth_end_deg / 180.0 * M_PI;
H->anchor_lines[ii].x = H->params.range_start * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_start * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_start * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
H->anchor_lines[ii].x = H->params.range_end * cos(el) * sin(az);
H->anchor_lines[ii].y = H->params.range_end * cos(el) * cos(az);
H->anchor_lines[ii].z = H->params.range_end * sin(el);
H->anchor_lines[ii].w = 1.0;
ii++;
}
// printf("num_anchor_lines = %zu ii = %d\n", H->num_anchor_lines, ii);
H->num_anchor_lines = ii;
return;
}
void RS_set_beam_pos(RSHandle *H, RSfloat az_deg, RSfloat el_deg) {
// Compute the unit vector of the pointing direction
H->sim_desc.s[RSSimulationDescriptionBeamUnitX] = cosf(el_deg / 180.0f * M_PI) * sinf(az_deg / 180.0f * M_PI);
H->sim_desc.s[RSSimulationDescriptionBeamUnitY] = cosf(el_deg / 180.0f * M_PI) * cosf(az_deg / 180.0f * M_PI);
H->sim_desc.s[RSSimulationDescriptionBeamUnitZ] = sinf(el_deg / 180.0f * M_PI);
H->status |= RSStatusDebrisRCSNeedsUpdate;
H->status |= RSStatusScattererSignalNeedsUpdate;
}
void RS_set_verbosity(RSHandle *H, const char verb) {
H->verb = verb;
}
void RS_set_debris_count(RSHandle *H, const int debris_id, const size_t count) {
int i;
if (debris_id == 0) {
printf("%s : RS : RS_set_debris_count() cannot have debris = 0.\n", now());
return;
}
// Account for hydrometeors -> debris
if (H->status & RSStatusDomainPopulated) {
size_t delta = count - H->counts[debris_id];
rsprint("Readjusting scatterer[0] %s -> %s", commaint(H->counts[0]), commaint(H->counts[0] - delta));
H->counts[0] -= delta;
}
H->counts[debris_id] = count;
// Always start with one as the background scatterers are always there
H->num_types = 1;
for (i = 1; i < RS_MAX_DEBRIS_TYPES; i++) {
if (H->counts[i] > 0) {
H->num_types++;
}
}
if (H->verb > 2) {
rsprint("Total number of body types = %d", (int)H->num_types);
}
if (H->sim_tic > 0.0f) {
RS_update_origins_offsets(H);
#if defined (_USE_GCL_)
RS_derive_ndranges(H);
#endif
}
}
void RS_revise_population(RSHandle *H) {
int ii;
// Get GPU preferred multiplication factor
// NOTE: make_pulse_pass_1 uses 2 x max_work_group_size stride
size_t max_work_group_size;
clGetDeviceInfo(H->workers[0].dev, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(max_work_group_size), &max_work_group_size, NULL);
const size_t mul = H->num_cus[0] * H->num_workers * max_work_group_size * 2;
if (H->sim_concept & RSSimulationConceptVerticallyPointingRadar) {
size_t anchors_per_plane = (H->num_anchors - 1) / 2;
H->num_scats = H->params.range_count * anchors_per_plane;
H->counts[0] = H->num_scats;
// Round the total population to a GPU preferred number
//size_t preferred_n = (size_t)((H->num_scats + mul - 1) / mul) * mul;
// Revise the background (rain)
//H->counts[0] = preferred_n;
sprintf(H->summary + strlen(H->summary),
"Anchors / Plane = %s\n", commafloat(anchors_per_plane));
if (H->verb) {
rsprint("Anchors / Plane = %s\n", commaint(anchors_per_plane));
}
} else {
// Volume of a single resolution cell at the start of the domain (svol = smallest volume)
RSfloat r = H->params.range_start;
RSfloat svol = (H->params.antenna_bw_rad * r) * (H->params.antenna_bw_rad * r) * (H->params.c * H->params.tau * 0.5f);
RSfloat nvol = H->sim_desc.s[RSSimulationDescriptionBoundSizeX]
* H->sim_desc.s[RSSimulationDescriptionBoundSizeY]
* H->sim_desc.s[RSSimulationDescriptionBoundSizeZ] / svol;
// Suggest a number of scatter bodies to use
H->num_scats = (size_t)(H->params.body_per_cell * nvol);
H->counts[0] = H->num_scats;
// Round the total population to a GPU preferred number
size_t preferred_n = H->num_scats;
if (H->num_scats > 50000) {
preferred_n = (size_t)(H->num_scats / mul) * mul;
if (preferred_n < H->params.body_per_cell * 9 / 10) {
preferred_n += mul;
}
}
// Revise the background (rain)
H->counts[0] = preferred_n;
sprintf(H->summary + strlen(H->summary),
"nvol = %s x volumes of %s m^3\n", commafloat(nvol), commafloat(svol));
sprintf(H->summary + strlen(H->summary),
"Average meteor. scatterer density = %s\n", commafloat((float)H->counts[0] / nvol));
if (H->verb) {
rsprint("nvol = %s x volumes of %s m^3\n", commafloat(nvol), commafloat(svol));
rsprint("Setting to GPU preferred %s", commaint(preferred_n));
rsprint("Average all-type scatterer density = %s scatterers / closest radar cell\n", commafloat((float)preferred_n / nvol));
rsprint("Average meteorological scatterer density = %s scatterers / closest radar cell\n", commafloat((float)H->counts[0] / nvol));
}
}
// Add in the debris
H->num_scats = H->counts[0];
for (ii = 1; ii < RS_MAX_DEBRIS_TYPES; ii++) {
if (H->counts[ii] > 0) {
H->num_scats += H->counts[ii];
}
}
// A hard limit here. Will do something about this next time.
if (H->num_scats > 20000000) {
rsprint("Too many scatterers (%s).\n", commaint(H->num_scats));
exit(0);
}
// The population count is now considered defined
H->status |= RSStatusPopulationDefined;
}
void RS_revise_debris_counts_to_gpu_preference(RSHandle *H) {
int i;
for (i = 0; i < RS_MAX_DEBRIS_TYPES; i++) {
if (H->counts[i]) {
H->counts[i] = ((H->counts[i] + H->preferred_multiple - 1) / H->preferred_multiple) * H->preferred_multiple;
}
}
}
size_t RS_get_debris_count(RSHandle *H, const int debris_id) {
return H->counts[debris_id];
}
size_t RS_get_worker_debris_count(RSHandle *H, const int debris_id, const int worker_id) {
return H->workers[worker_id].counts[debris_id];
}
size_t RS_get_all_worker_debris_counts(RSHandle *H, const int debris_id, size_t counts[]) {
int i;
for (i = 0; i < H->num_workers; i++) {
counts[i] = H->workers[i].counts[debris_id];
}
return H->counts[debris_id];
}
RSVolume RS_get_domain(RSHandle *H) {
RSVolume v;
v.size.x = H->sim_desc.s[RSSimulationDescriptionBoundSizeX];
v.size.y = H->sim_desc.s[RSSimulationDescriptionBoundSizeY];
v.size.z = H->sim_desc.s[RSSimulationDescriptionBoundSizeZ];
v.origin.x = H->sim_desc.s[RSSimulationDescriptionBoundOriginX];
v.origin.y = H->sim_desc.s[RSSimulationDescriptionBoundOriginY];
v.origin.z = H->sim_desc.s[RSSimulationDescriptionBoundOriginZ];
return v;
}
void RS_update_origins_offsets(RSHandle *H) {
int i, k;
size_t count = H->num_scats;
if (H->num_workers == 0) {
rsprint("ERROR: Number of workers = 0.");
exit(EXIT_FAILURE);
}
// Divide the scatter bodies into (num_workers) chunks
const size_t sub_num_scats = H->num_scats / MAX(1, H->num_workers);
size_t offset = 0;
for (i = 0; i < H->num_workers; i++) {
H->offset[i] = offset;
H->workers[i].num_scats = sub_num_scats;
if (H->verb > 2) {
rsprint("workers[%d] num_scats = %s offset = %s", i, commaint(sub_num_scats), commaint(H->offset[i]));
}
offset += sub_num_scats;
}
k = RS_MAX_DEBRIS_TYPES;
while (k > 1) {
k--;
count -= H->counts[k];
}
if (H->counts[0] != count) {
rsprint("ERROR: Inconsistent debris counts.");
printf(RS_INDENT "o sub_num_scats = %s\n", commaint(sub_num_scats));
printf(RS_INDENT "o population[0] = %s != count = %s\n", commaint(H->counts[0]), commaint(count));
for (k = 1; k < H->num_types; k++) {
printf(RS_INDENT "o population[%d] = %s\n", k, commaint(H->counts[k]));
}
exit(EXIT_FAILURE);
}
// Volume of a single resolution cell at the start of the domain (svol = smallest volume)
RSfloat r = H->params.range_start;
RSfloat svol = (H->params.antenna_bw_rad * r) * (H->params.antenna_bw_rad * r) * (H->params.c * H->params.tau * 0.5f);
RSfloat nvol = (H->sim_desc.s[RSSimulationDescriptionBoundSizeX] * H->sim_desc.s[RSSimulationDescriptionBoundSizeY] * H->sim_desc.s[RSSimulationDescriptionBoundSizeZ]) / svol;
if (H->verb) {
rsprint("RS : Population details:");
for (k = 0; k < RS_MAX_DEBRIS_TYPES; k++) {
if (H->counts[k] == 0) {
break;
}
printf(RS_INDENT "o Global population[%d] = %s (%s scatterers / resolution cell)\n", k, commaint(H->counts[k]), commafloat((float)H->counts[k] / nvol));
}
}
k = RS_MAX_DEBRIS_TYPES;
while (k > 0) {
k--;
size_t debris_count_left = H->counts[k];
if (debris_count_left == 0) {
for (i = 0; i < H->num_workers; i++) {
H->workers[i].counts[k] = 0;
}
continue;
}
// Groups of debris types
size_t round_up_down_toggle = H->num_workers > 1 ? k % H->num_workers : k;
size_t sub_counts = (H->counts[k] + round_up_down_toggle) / H->num_workers;
for (i = 0; i < H->num_workers - 1; i++) {
H->workers[i].counts[k] = sub_counts;
debris_count_left -= sub_counts;
}
// The last worker gets all the remainders
H->workers[i].counts[k] = debris_count_left;
}
for (i = 0; i < H->num_workers; i++) {
k = RS_MAX_DEBRIS_TYPES;
size_t origin = H->workers[i].num_scats;
while (k > 1) {
k--;
if (H->workers[i].counts[k] == 0) {
continue;
}
origin -= H->workers[i].counts[k];
H->workers[i].origins[k] = origin;
}
}
if (H->verb > 2) {
for (i = 0; i < H->num_workers; i++) {
rsprint("RS : workers[%d] with total population %s offset %s\n", i, commaint(H->workers[i].num_scats), commaint(H->offset[i]));
for (k = 0; k < H->num_types; k++) {
printf(RS_INDENT "o Local population[%d] - [ %9s, %9s, %9s ]\n", k,
commaint(H->workers[i].origins[k]),
commaint(H->workers[i].counts[k]),
commaint(H->workers[i].origins[k] + H->workers[i].counts[k]));
}
}
}
}
void RS_set_dsd(RSHandle *H, const float *nd, const float *diameters, const int count, const char name) {
if (H->status & RSStatusDomainPopulated) {
rsprint("Simulation domain has been populated. DSD cannot be changed.");
return;
}
int i;
if (count == 0) {
printf(" %s : RS : DSD bin count cannot be 0.\n", now());
return;
}
H->dsd_name = name;
H->dsd_count = count;
if (H->dsd_r != NULL) {
free(H->dsd_r);
free(H->dsd_pdf);
free(H->dsd_cdf);
free(H->dsd_pop);
}
// Derive concentration to pdf
RSfloat *pdf = (RSfloat *)malloc(count * sizeof(RSfloat));
H->dsd_nd_sum = 0.0f;
for (i = 0; i < count; i++) {
H->dsd_nd_sum += nd[i];
}
for (i = 0; i < count; i++) {
pdf[i] = nd[i] / H->dsd_nd_sum;
}
// Total drops
rsprint("Drop concentration ~ %s drops / m^3", commaint(H->dsd_nd_sum));
H->dsd_r = (RSfloat *)malloc(count * sizeof(RSfloat));
H->dsd_pdf = (RSfloat *)malloc(count * sizeof(RSfloat));
H->dsd_cdf = (RSfloat *)malloc(count * sizeof(RSfloat));
H->dsd_pop = (size_t *)malloc(count * sizeof(size_t));
if (H->dsd_r == NULL || H->dsd_pdf == NULL || H->dsd_cdf == NULL || H->dsd_pop == NULL) {
rsprint("ERROR: Unable to allocate memory for DSD parameterization.");
free(pdf);
return;
}
memset(H->dsd_r, 0, count * sizeof(RSfloat));
memset(H->dsd_pdf, 0, count * sizeof(RSfloat));
memset(H->dsd_cdf, 0, count * sizeof(RSfloat));
memset(H->dsd_pop, 0, count * sizeof(size_t));
RSfloat lo = 0.0f;
for (i = 0; i < count; i++) {
H->dsd_r[i] = 0.5f * diameters[i];
H->dsd_pdf[i] = pdf[i];
H->dsd_cdf[i] = lo;
lo += pdf[i];
}
if (H->verb > 1) {
printf("%s : RS : User set DSD specifications:\n", now());
for (i = 0; i < MIN(MAX(count - 2, 1), 3); i++) {
printf(RS_INDENT "o %.2f mm - PDF %.4f / TH %.4f\n", 2000.0f * H->dsd_r[i], H->dsd_pdf[i], H->dsd_cdf[i]);
}
if (count > 5) {
printf(RS_INDENT "o : - : / :\n");
printf(RS_INDENT "o : - : / :\n");
i = MAX(4, count - 1);
}
for (; i < count; i++) {
printf(RS_INDENT "o %.2f mm - PDF %.4f / TH %.4f\n", 2000.0f * H->dsd_r[i], H->dsd_pdf[i], H->dsd_cdf[i]);
}
}
free(pdf);
}
void RS_set_dsd_to_mp_with_sizes(RSHandle *H, const float *ds, const int count) {
int i;
float d;
H->dsd_n0 = 8000.0f; // Marshall-Palmer 1948, mu = 0.08 cm^-4 = 8000 m^-3 m^-1
H->dsd_lambda = 2.3f * 1000.0f; // Let's say rainrate of ~15 mm hr^-1, lambda = 41 R ^-0.21 = 2.3
RSfloat *n = (RSfloat *)malloc(count * sizeof(RSfloat));
// Derive a concentration curve
for (i = 0; i < count; i++) {
d = ds[i];
n[i] = H->dsd_n0 * exp(-H->dsd_lambda * d);
}
RS_set_dsd(H, n, ds, count, RSDropSizeDistributionMarshallPalmer);
free(n);
}
void RS_set_dsd_to_mp(RSHandle *H) {
float ds[] = {0.001f, 0.002f, 0.003f, 0.004f, 0.005f};
RS_set_dsd_to_mp_with_sizes(H, ds, sizeof(ds) / sizeof(float));
}
void RS_set_rcs_ellipsoid_table(RSHandle *H, const cl_float4 *weights, const float table_index_start, const float table_index_delta, unsigned int table_size) {
int i;
RSTable table = RS_table_init(table_size * 4);
if (table.data == NULL) {
return;
}
// Set up the coefficients for FMA(a, b, c) in the CL kernel
table.dx = 1.0f / table_index_delta;
table.x0 = -table_index_start * table.dx;
table.xm = (float)table_size - 1.0f;
memcpy(table.data, weights, table_size * sizeof(cl_float4));
if (H->verb > 1) {
rsprint("Host RCS of ellipsoid table received. dx = %.4f x0 = %.1f xm = %.0f n = %d\n",
table.dx, table.x0, table.xm, table_size);
}
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].rcs_ellipsoid != NULL) {
if (H->verb > 1) {
rsprint("workers[%d] setting RCS of ellipsoids.\n", i);
}
gcl_free(H->workers[i].rcs_ellipsoid);
}
H->workers[i].rcs_ellipsoid = gcl_malloc(table_size * sizeof(cl_float4), table.data, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR);
if (H->workers[i].rcs_ellipsoid == NULL) {
rsprint("ERROR: Unable to create RCS of ellipsoid table on CL device.\n");
return;
}
}
#else
cl_int ret;
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].rcs_ellipsoid != NULL) {
if (H->verb > 1) {
rsprint("workers[%d] setting RCS of ellipsoid.\n", i);
}
clReleaseMemObject(H->workers[i].rcs_ellipsoid);
}
if (H->verb > 2) {
rsprint("workers[%d] creating RCS of ellipsoid (cl_mem) & copying data from %p.\n", i, table.data);
}
H->workers[i].rcs_ellipsoid = clCreateBuffer(H->workers[i].context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, table_size * sizeof(cl_float4), table.data, &ret);
if (ret != CL_SUCCESS) {
rsprint("ERROR: Unable to create RCS of ellipsoid table on CL device. ret = %d\n", ret);
return;
}
if (H->verb > 2) {
rsprint("workers[%d] created RCS of ellipsoid table @ %p.\n", i, H->workers[i].rcs_ellipsoid);
}
}
#endif
for (i = 0; i < H->num_workers; i++) {
// Copy over to CL workers. A bit wasteful but the codes are easier to ready this way.
H->workers[i].rcs_ellipsoid_desc.s[RSTable1DDescriptionScale] = table.dx;
H->workers[i].rcs_ellipsoid_desc.s[RSTable1DDescriptionOrigin] = table.x0;
H->workers[i].rcs_ellipsoid_desc.s[RSTable1DDescriptionMaximum] = table.xm;
H->workers[i].rcs_ellipsoid_desc.s[RSTable1DDescriptionUserConstant] = H->sim_desc.s[RSSimulationDescriptionDropConcentrationScale];
if (!(H->sim_concept & RSSimulationConceptFixedScattererPosition) && H->workers[i].rcs_ellipsoid_desc.s[RSTable1DDescriptionUserConstant] == 0.0f) {
rsprint("WARNING: Drop concentration scale not set.");
}
H->workers[i].mem_usage += (cl_uint)(table.xm + 1.0f) * sizeof(cl_float4);
}
RS_table_free(table);
}
void RS_set_range_weight(RSHandle *H, const float *weights, const float table_index_start, const float table_index_delta, unsigned int table_size) {
int i;
RSTable table = RS_table_init(table_size);
if (table.data == NULL) {
return;
}
// Set up the coefficients for FMA(a, b, c) in the CL kernel
table.dx = 1.0f / table_index_delta;
table.x0 = -table_index_start * table.dx;
table.xm = (float)table_size - 1.0f;
memcpy(table.data, weights, table_size * sizeof(float));
if (H->verb > 1) {
rsprint("Host range weight table received. dx = %.4f x0 = %.1f xm = %.0f n = %d\n",
table.dx, table.x0, table.xm, table_size);
}
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].range_weight != NULL) {
if (H->verb > 1) {
rsprint("workers[%d] setting range weight.", i);
}
gcl_free(H->workers[i].range_weight);
}
H->workers[i].range_weight = gcl_malloc(table_size * sizeof(float), table.data, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR);
if (H->workers[i].range_weight == NULL) {
rsprint("ERROR: Unable to create range weight table on CL device.");
return;
}
}
#else
cl_int ret;
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].range_weight != NULL) {
if (H->verb > 1) {
rsprint("workers[%d] setting range weight.", i);
}
clReleaseMemObject(H->workers[i].range_weight);
}
if (H->verb > 2) {
rsprint("workers[%d] creating range weight (cl_mem) & copying data from %p.", now(), i, table.data);
}
H->workers[i].range_weight = clCreateBuffer(H->workers[i].context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, table_size * sizeof(float), table.data, &ret);
if (ret != CL_SUCCESS) {
rsprint("ERROR: Unable to create range weight table on CL device.");
return;
}
if (H->verb > 2) {
rsprint("workers[%d] created range weight @ %p.", i, H->workers[i].range_weight);
}
}
#endif
for (i = 0; i < H->num_workers; i++) {
// Copy over to CL workers. A bit wasteful but the codes are easier to ready this way.
H->workers[i].range_weight_desc.s[RSTable1DDescriptionScale] = table.dx;
H->workers[i].range_weight_desc.s[RSTable1DDescriptionOrigin] = table.x0;
H->workers[i].range_weight_desc.s[RSTable1DDescriptionMaximum] = table.xm;
H->workers[i].mem_usage += (cl_uint)(table.xm + 1.0f) * sizeof(cl_float);
}
RS_table_free(table);
}
void RS_set_range_weight_to_triangle(RSHandle *H, float pulse_width_m) {
float w[3] = {0.0f, 1.0f, 0.0f};
RS_set_range_weight(H, w, -pulse_width_m, pulse_width_m, 3);
}
void RS_set_angular_weight(RSHandle *H, const float *weights, const float table_index_start, const float table_index_delta, unsigned int table_size) {
int i;
RSTable table = RS_table_init(table_size);
if (table.data == NULL) {
rsprint("RS_set_angular_weight(): Unable to allocate memory.\n");
return;
}
// Set up the coefficients for FMA(a, b, c) in the CL kernel
table.dx = 1.0f / table_index_delta;
table.x0 = -table_index_start * table.dx;
table.xm = (float)table_size - 1.0f;
memcpy(table.data, weights, table_size * sizeof(float));
if (H->verb > 1) {
rsprint("Host angular weight table received. dx = %.4f x0 = %.1f xm = %.0f n = %d",
table.dx, table.x0, table.xm, table_size);
}
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].angular_weight != NULL) {
if (H->verb > 1) {
rsprint("workers[%d] setting angular weight.", i);
}
gcl_free(H->workers[i].angular_weight);
}
//for (int k = 0; k < table_size; k++) {
// printf("k=%d w = %.3f %.2f\n", k, table.data[k], 10 * log10f(table.data[k]));
//}
H->workers[i].angular_weight = gcl_malloc(table_size * sizeof(float), table.data, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR);
if (H->workers[i].angular_weight == NULL) {
rsprint("ERROR: Unable to create angular weight table on CL device.");
return;
}
}
#else
cl_int ret;
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].angular_weight != NULL) {
if (H->verb > 1) {
rsprint("workers[%d] setting angular weight.\n", i);
}
clReleaseMemObject(H->workers[i].angular_weight);
}
if (H->verb > 2) {
rsprint("workers[%d] creating angular weight (cl_mem) & copying data from %p.", i, table.data);
}
H->workers[i].angular_weight = clCreateBuffer(H->workers[i].context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, table_size * sizeof(float), table.data, &ret);
if (ret != CL_SUCCESS) {
rsprint("ERROR: Unable to create angular weight table on CL device.");
return;
}
if (H->verb > 2) {
rsprint("workers[%d] created angular weight.", i);
}
}
#endif
// Copy over to CL workers. A bit wasteful but the codes are easier to ready this way.
for (i = 0; i < H->num_workers; i++) {
H->workers[i].angular_weight_desc.s[RSTable1DDescriptionScale] = table.dx;
H->workers[i].angular_weight_desc.s[RSTable1DDescriptionOrigin] = table.x0;
H->workers[i].angular_weight_desc.s[RSTable1DDescriptionMaximum] = table.xm;
H->workers[i].mem_usage += (cl_uint)(table.xm + 1.0f) * sizeof(cl_float);
}
RS_table_free(table);
}
void RS_set_angular_weight_2d(RSHandle *H,
const float *weights,
const float xs, const float xo, unsigned int xc,
const float ys, const float yo, unsigned int yc) {
int i;
RSTable2D table = RS_table2d_init(xc * yc);
table.xs = xs;
table.xo = xo;
table.xm = (float)(xc - 1);
table.ys = ys;
table.yo = yo;
table.ym = (float)(yc - 1);
memcpy(table.data, weights, xc * yc * sizeof(float));
if (H->verb > 1) {
rsprint("Host angular weight table 2d received\n");
rsprint("dx = %.4f x0 = %.1f xm = %.1f n = %d\n", table.xs, table.xo, table.xm, xc);
rsprint("dy = %.4f y0 = %.1f ym = %.1f n = %d\n", table.ys, table.yo, table.ym, yc);
}
// This is the part that we need to create a texture map for the RSTable2D table
cl_image_format format = {CL_RGBA, CL_FLOAT};
#if defined (CL_VERSION_1_2)
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = xc;
desc.image_height = yc;
desc.image_depth = 1;
desc.image_array_size = 0;
desc.image_row_pitch = desc.image_width * sizeof(cl_float4);
desc.image_slice_pitch = desc.image_height * desc.image_row_pitch;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = NULL;
#endif
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].angular_weight_2d != NULL) {
#if defined (_USE_GCL_)
gcl_release_image(H->workers[i].angular_weight_2d);
#else
clReleaseMemObject(H->workers[i].angular_weight_2d);
#endif
H->workers[i].mem_usage -= ((cl_uint)(H->workers[i].angular_weight_2d_desc.s8 + 1.0f) * (H->workers[i].angular_weight_2d_desc.s9 + 1.0f) * sizeof(cl_float4));
}
#if defined (_USE_GCL_)
H->workers[i].angular_weight_2d = gcl_create_image(&format, xc, yc, 1, H->workers[i].surf_angular_weight_2d);
#elif defined (CL_VERSION_1_2)
cl_int ret;
cl_mem_flags flags = CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR;
H->workers[i].angular_weight_2d = clCreateImage(H->workers[i].context, flags, &format, &desc, table.data, &ret);
#else
H->workers[i].angular_weight_2d = clCreateImage2D(H->workers[i].context, flags, &format, xc, yc, xc * sizeof(cl_float4), table.data, &ret);
#endif
if (H->workers[i].angular_weight_2d == NULL) {
rsprint("ERROR: workers[%d] unable to create angular_weight_2d tables on CL device(s).", i);
return;
} else if (H->verb > 2) {
rsprint("workers[%d] created angular_weight_2d table @ %p", i, H->workers[i].angular_weight_2d);
}
#if defined (_USE_GCL_)
dispatch_async(H->workers[i].que, ^{
size_t origin[3] = {0, 0, 0};
size_t region[3] = {table.xc, table.yc, 1};
gcl_copy_ptr_to_image(H->workers[i].angular_weight_2d, table.data, origin, region);
dispatch_semaphore_signal(H->workers[i].sem);
});
#endif
}
for (i = 0; i < H->num_workers; i++) {
#if defined (_USE_GCL_)
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
#endif
// Copy over to CL worker
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionScaleX] = table.xs;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionScaleY] = table.ys;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionScaleZ] = 0.0f;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionOriginX] = table.xo;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionOriginY] = table.yo;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionOriginZ] = 0.0f;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionMaximumX] = table.xm;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionMaximumY] = table.ym;
H->workers[i].angular_weight_2d_desc.s[RSTable3DDescriptionMaximumZ] = 0.0f;
H->workers[i].mem_usage += ((cl_uint)(table.xm + 1.0f) * (table.ym + 1.0f)) * sizeof(cl_float4);
}
RS_table2d_free(table);
}
void RS_set_angular_weight_to_double_cone(RSHandle *H, float beamwidth_rad) {
float w[] = {1.0f, 0.0f, 0.8f, 0.0f};
unsigned int n = sizeof(w) / sizeof(float);
RS_set_angular_weight(H, w, 0.0f, 2.0f * beamwidth_rad, n);
}
void RS_set_angular_weight_to_standard(RSHandle *H, float beamwidth_rad) {
const unsigned int n = 32;
float a;
float b = 1.27f * M_PI / beamwidth_rad;
float c;
float *w = (float *)malloc(n * sizeof(float));
float delta = 1.0f / 360.0f * M_PI;
for (int i = 0; i < n; i++) {
a = (float)i * delta;
c = b * sinf(a);
if (i == 0) {
w[i] = 1.0f;
} else if (i == (n - 1)) {
w[i] = 0.0f;
} else {
w[i] = 8.0f * jn(2, c) / (c * c);
}
//printf("angle=%.4f deg w[%d] = %.4f dB\n", a / M_PI * 180.0f, i, 20.0f * log10f(w[i]));
}
RS_set_angular_weight(H, w, 0.0f, delta, n);
free(w);
}
void RS_set_vel_data(RSHandle *H, const RSTable3D table) {
int i;
cl_int ret = -1;
cl_mem_flags flags = CL_MEM_READ_ONLY;
cl_image_format format = {CL_RGBA, CL_FLOAT};
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].les_uvwt[0] == NULL) {
#if defined (_USE_GCL_)
H->workers[i].les_uvwt[0] = gcl_create_image(&format, table.x_, table.y_, table.z_, H->workers[i].surf_uwvt[0]);
H->workers[i].les_uvwt[1] = gcl_create_image(&format, table.x_, table.y_, table.z_, H->workers[i].surf_uvwt[1]);
H->workers[i].les_cpxx[0] = gcl_create_image(&format, table.x_, table.y_, table.z_, H->workers[i].surf_cpxx[0]);
H->workers[i].les_cpxx[1] = gcl_create_image(&format, table.x_, table.y_, table.z_, H->workers[i].surf_cpxx[1]);
#elif defined (CL_VERSION_1_2)
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE3D;
desc.image_width = table.x_;
desc.image_height = table.y_;
desc.image_depth = table.z_;
desc.image_array_size = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = NULL;
H->workers[i].les_uvwt[0] = clCreateImage(H->workers[i].context, flags, &format, &desc, NULL, &ret);
H->workers[i].les_uvwt[1] = clCreateImage(H->workers[i].context, flags, &format, &desc, NULL, NULL);
H->workers[i].les_cpxx[0] = clCreateImage(H->workers[i].context, flags, &format, &desc, NULL, NULL);
H->workers[i].les_cpxx[1] = clCreateImage(H->workers[i].context, flags, &format, &desc, NULL, NULL);
#else
H->workers[i].les_uvwt[0] = clCreateImage3D(H->workers[i].context, flags, &format, table.x_, table.y_, table.z_, 0, 0, NULL, &ret);
H->workers[i].les_uvwt[1] = clCreateImage3D(H->workers[i].context, flags, &format, table.x_, table.y_, table.z_, 0, 0, NULL, NULL);
H->workers[i].les_cpxx[0] = clCreateImage3D(H->workers[i].context, flags, &format, table.x_, table.y_, table.z_, 0, 0, NULL, NULL);
H->workers[i].les_cpxx[1] = clCreateImage3D(H->workers[i].context, flags, &format, table.x_, table.y_, table.z_, 0, 0, NULL, NULL);
#endif
if (H->workers[i].les_uvwt[0] == NULL || H->workers[i].les_uvwt[1] == NULL || H->workers[i].les_cpxx[0] == NULL || H->workers[i].les_cpxx[1] == NULL) {
rsprint("ERROR: workers[%d] unable to create wind table on CL device. ret = %d table of %d x %d x %d @ %p (%d)\n", i, ret, table.x_, table.y_, table.z_, table.uvwt, flags);
exit(EXIT_FAILURE);
} else if (H->verb > 2) {
rsprint("workers[%d] created wind table @ %p %p %p %p\n", i,
&H->workers[i].les_uvwt[0], &H->workers[i].les_uvwt[1],
&H->workers[i].les_cpxx[0], &H->workers[i].les_cpxx[1]);
}
} // if (H->workers[i].vel[0] == NULL) ...
#if defined (_USE_GCL_)
dispatch_async(H->workers[i].que, ^{
size_t origin[3] = {0, 0, 0};
size_t region[3] = {table.x_, table.y_, table.z_};
gcl_copy_ptr_to_image(H->workers[i].les_uvwt[H->workers[i].les_id], table.data, origin, region);
gcl_copy_ptr_to_image(H->workers[i].les_cpxx[H->workers[i].les_id], table.data, origin, region);
dispatch_semaphore_signal(H->workers[i].sem_upload);
});
#else
size_t origin[3] = {0, 0, 0};
size_t region[3] = {table.x_, table.y_, table.z_};
clEnqueueWriteImage(H->workers[i].que, H->workers[i].les_uvwt[H->workers[i].les_id], CL_FALSE, origin, region,
table.x_ * sizeof(cl_float4), table.y_ * table.x_ * sizeof(cl_float4), table.uvwt, 0, NULL, &H->workers[i].event_upload);
clEnqueueWriteImage(H->workers[i].que, H->workers[i].les_cpxx[H->workers[i].les_id], CL_FALSE, origin, region,
table.x_ * sizeof(cl_float4), table.y_ * table.x_ * sizeof(cl_float4), table.cpxx, 0, NULL, &H->workers[i].event_upload);
#endif
}
for (i = 0; i < H->num_workers; i++) {
#if defined (_USE_GCL_)
dispatch_semaphore_wait(H->workers[i].sem_upload, DISPATCH_TIME_FOREVER);
#else
clWaitForEvents(1, &H->workers[i].event_upload);
#endif
// Copy over to CL worker
float tmpf; memcpy(&tmpf, &table.spacing, sizeof(float));
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionFormat] = tmpf; // Make a copy in float so we are maintaining all 32-bits
//printf("%s : RS : %d / %.9f\n", now(), table.spacing, H->workers[i].vel_desc.s[RSTable3DStaggeredDescriptionFormat]);
if (table.spacing & RSTableSpacingStretchedX) {
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionBaseChangeX] = table.xs; // "m" for stretched grid: m * log1p(n * pos.x) + o;
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionPositionScaleX] = table.xo; // "n" for stretched grid: m * log1p(n * pos.x) + o;
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionOffsetX] = table.xm; // "o" for stretched grid: m * log1p(n * pos.x) + o;
} else {
H->workers[i].les_desc.s[RSTable3DDescriptionScaleX] = table.xs;
H->workers[i].les_desc.s[RSTable3DDescriptionOriginX] = table.xo;
H->workers[i].les_desc.s[RSTable3DDescriptionMaximumX] = table.xm;
}
if (table.spacing & RSTableSpacingStretchedY) {
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionBaseChangeY] = table.ys;
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionPositionScaleY] = table.yo;
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionOffsetY] = table.ym;
} else {
H->workers[i].les_desc.s[RSTable3DDescriptionScaleY] = table.ys;
H->workers[i].les_desc.s[RSTable3DDescriptionOriginY] = table.yo;
H->workers[i].les_desc.s[RSTable3DDescriptionMaximumY] = table.ym;
}
if (table.spacing & RSTableSpacingStretchedZ) {
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionBaseChangeZ] = table.zs;
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionPositionScaleZ] = table.zo;
H->workers[i].les_desc.s[RSTable3DStaggeredDescriptionOffsetZ] = table.zm;
} else {
H->workers[i].les_desc.s[RSTable3DDescriptionScaleZ] = table.zs;
H->workers[i].les_desc.s[RSTable3DDescriptionOriginZ] = table.zo;
H->workers[i].les_desc.s[RSTable3DDescriptionMaximumZ] = table.zm;
}
H->workers[i].les_desc.s[RSTable3DDescriptionRefreshTime] = table.tr;
}
}
void RS_set_vel_data_to_config(RSHandle *H, LESConfig c) {
int i;
if (H->L != NULL) {
LES_free(H->L);
}
if (H->verb) {
rsprint("Using LES configuration '" UNDERLINE("%s") "' ...", (char *)c);
}
H->L = LES_init_with_config_path(c, NULL);
#if defined(GUI)
LES_set_delayed_read(H->L);
#endif
// Release the pre-existing memory. Alaways assume the new LESConfig is not the same size.
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].les_uvwt[i] != NULL) {
#if defined (_USE_GCL_)
gcl_release_image(H->workers[i].les_uvwt[0]);
gcl_release_image(H->workers[i].les_uvwt[1]);
#else
clReleaseMemObject(H->workers[i].les_uvwt[0]);
clReleaseMemObject(H->workers[i].les_uvwt[1]);
#endif
H->workers[i].les_uvwt[0] = NULL;
H->workers[i].les_uvwt[1] = NULL;
}
}
// Reset the velocity index to 0.
// The GPU handles are still kept intact, will be released upon framework completion / table replacement
H->vel_idx = 0;
H->vel_count = (uint32_t)LES_get_table_count(H->L);
if (H->verb) {
rsprint("Reading LES table (%u out of %u)...", H->vel_idx, H->vel_count);
}
RS_set_vel_data_to_LES_table(H, LES_get_frame(H->L, 0));
H->vel_idx = 1;
}
void RS_set_vel_data_to_LES_table(RSHandle *H, const LESTable *leslie) {
float hmax, zmax;
RSTable3D table = RS_table3d_init(leslie->nn);
if (table.uvwt == NULL) {
rsprint("ERROR: LES input data cannot be NULL.");
return;
}
if (leslie->is_stretched) {
//
// For LES tables with streched grid:
//
// dz(k) = a * r ^ k
//
// z(k) = a * ( 1 - r ^ k ) / ( 1 - r )
//
// k = 1 / log ( r ) * log ( 1 - ( 1 - r ) / a * z [ k ] )
// = 1 / log ( r ) * log ( 1 + ( r - 1 ) / a * z [ k ] )
//
// For a = 2.7, r = 1.05, these values may be documented in the data files at some point
//
// k = 20.495934314287851 * log1p ( 0.018518518518519 * z [ k ] )
//
// For a = 2.0, r = 1.0212
//
// k = 47.6681 * log1p ( 0.0160000000 * z [ k ] )
//
table.spacing = RSTableSpacingStretchedX | RSTableSpacingStretchedY | RSTableSpacingStretchedZ;
table.x_ = leslie->nx; table.xm = 0.5f * (float)(leslie->nx - 1); table.xs = 1.0f / log(leslie->rx); table.xo = (leslie->rx - 1.0f) / leslie->ax;
table.y_ = leslie->ny; table.ym = 0.5f * (float)(leslie->ny - 1); table.ys = 1.0f / log(leslie->ry); table.yo = (leslie->ry - 1.0f) / leslie->ay;
table.z_ = leslie->nz; table.zm = 0.0f; table.zs = 1.0f / log(leslie->rz); table.zo = (leslie->rz - 1.0f) / leslie->az;
hmax = leslie->ax * (1.0f - powf(leslie->rx, table.xm)) / (1.0f - leslie->rx);
zmax = leslie->az * (1.0f - powf(leslie->rz, (float)(leslie->nz - 1))) / (1.0f - leslie->rz);
if (H->verb > 0 && H->vel_idx == 0) {
rsprint("LES stretched x-grid using %.6f * log1p( %.6f * x ) Mid = %.2f m\n",
table.xs, table.xo, hmax);
rsprint("LES stretched z-grid using %.6f * log1p( %.6f * z ) Max = %.2f m\n",
table.zs, table.zo, zmax);
rsprint("GPU LES[%2d/%2d] (%d, %s MB)\n",
H->vel_idx, H->vel_count,
H->workers[0].les_id,
commaint(leslie->nn * sizeof(cl_float4) / 1024 / 1024));
}
} else {
table.x_ = leslie->nx; table.xm = (float)leslie->nx - 1.0f; table.xs = 1.0f / leslie->rx; table.xo = (float)(leslie->nx - 1) * 0.5f;
table.y_ = leslie->ny; table.ym = (float)leslie->ny - 1.0f; table.ys = 1.0f / leslie->ry; table.yo = (float)(leslie->ny - 1) * 0.5f;
table.z_ = leslie->nz; table.zm = (float)leslie->nz - 1.0f; table.zs = 1.0f / leslie->rz; table.zo = 0.0f;
hmax = 0.5f * ((float)leslie->nx - 1.0) * leslie->rx;
zmax = ((float)leslie->nz - 1.0) * leslie->rz;
if (H->verb > 0 && H->vel_idx == 0) {
rsprint("LES uniform grid spacing using %.2f, %.2f, %.2f m\n", leslie->rx, leslie->ry, leslie->rz);
rsprint("GPU LES[%2d/%2d] (%d, %s MB)\n",
H->vel_idx, H->vel_count,
H->workers[0].les_id,
commaint(leslie->nn * sizeof(cl_float4) / 1024 / 1024));
}
}
if (H->verb > 0 && H->vel_idx == 0) {
printf(RS_INDENT "o X:[ %.2f - %.2f ] (%.2f) m\n"
RS_INDENT "o Y:[ %.2f - %.2f ] (%.2f) m\n"
RS_INDENT "o Z:[ %.2f - %.2f ] (%.2f) m\n",
-hmax, hmax, 2.0f * hmax,
-hmax, hmax, 2.0f * hmax,
0.0, zmax, zmax);
}
// Some other parameters
table.tr = leslie->tr;
// There is a toll-free bridge: LESTable has a remapped data structure during background read so there is no need to copy, just reassign the pointer, gotta love C!
void *uvwt_orig = table.uvwt;
void *cpxx_orig = table.cpxx;
table.uvwt = (cl_float4 *)leslie->uvwt;
table.cpxx = (cl_float4 *)leslie->cpxx;
if (H->sim_concept & RSSimulationConceptDebrisFluxFromVelocity) {
//RS_set_debris_flux_field_to_center_cell_of_3x3(H);
//RS_set_debris_flux_field_to_checker_board(H, 51);
//RS_set_debris_flux_field_to_checker_board_stretched(H, leslie);
RS_set_debris_flux_field_from_LES(H, leslie);
}
// Now we call the function to upload to GPU memory
RS_set_vel_data(H, table);
// Restore the pointer so that it can be freed as expected.
table.uvwt = uvwt_orig;
table.cpxx = cpxx_orig;
// Cache a copy of the parameters but not the data, the data could be deallocated immediately after this function call.
H->vel_desc = *leslie;
memset(&H->vel_desc.data, 0, sizeof(LESValue));
RS_table3d_free(table);
}
void RS_set_vel_data_to_uniform(RSHandle *H, cl_float4 velocity) {
RSTable3D table = RS_table3d_init(1);
RSVolume domain = RS_get_domain(H);
if (H->verb > 1) {
rsprint("Uniform @ X:[ %.2f - %.2f ] Y:[ %.2f - %.2f ] Z:[ %.2f - %.2f ]",
domain.origin.x, domain.origin.x + domain.size.x,
domain.origin.y, domain.origin.y + domain.size.y,
domain.origin.z, domain.origin.z + domain.size.z);
}
// Set up the mapping coefficients:
table.x_ = 1; table.xm = 0.0f; table.xs = 1.0f / domain.size.x; table.xo = 0.0f;
table.y_ = 1; table.ym = 0.0f; table.ys = 1.0f / domain.size.y; table.yo = 0.0f;
table.z_ = 1; table.zm = 0.0f; table.zs = 1.0f / domain.size.z; table.zo = 0.0f;
table.tr = 1000.0f;
table.uvwt[0].x = velocity.x;
table.uvwt[0].y = velocity.y;
table.uvwt[0].z = velocity.z;
table.uvwt[0].w = 0.0f;
RS_set_vel_data(H, table);
RS_table3d_free(table);
}
void RS_set_vel_data_to_cube27(RSHandle *H) {
int i;
RSTable3D table = RS_table3d_init(27);
RSVolume domain = RS_get_domain(H);
if (H->verb > 1) {
rsprint("Cube27 @ X:[ %.2f - %.2f ] Y:[ %.2f - %.2f ] Z:[ %.2f - %.2f ]",
domain.origin.x, domain.origin.x + domain.size.x,
domain.origin.y, domain.origin.y + domain.size.y,
domain.origin.z, domain.origin.z + domain.size.z);
}
// Set up the mapping coefficients: -table_start * table_xs
table.x_ = 3; table.xm = 2.0f; table.xs = 3.0f / domain.size.x; table.xo = 1.0f;
table.y_ = 3; table.ym = 2.0f; table.ys = 3.0f / domain.size.y; table.yo = 1.0f;
table.z_ = 3; table.zm = 2.0f; table.zs = 3.0f / domain.size.z; table.zo = 0.0f;
table.tr = 1000.0f;
// printf(" %.2f x %.2f = %.2f\n", -domain.origin.x, H->physics_table.xs, -domain.origin.x / domain.size.x * 2.0f);
// printf("o = [%.2f, %.2f, %.2f]\n", table.xo, table.yo, table.zo);
const float v = 10.0f;
for (i = 0; i < 27; i++) {
table.uvwt[i].x = (float) (i % 3) * v - v;
table.uvwt[i].y = (float)((i % 9) / 3) * v - v;
table.uvwt[i].z = (float) (i / 9) * v - v;
table.uvwt[i].w = 0.0f;
}
RS_set_vel_data(H, table);
RS_table3d_free(table);
}
void RS_set_vel_data_to_cube125(RSHandle *H) {
int i;
RSTable3D table = RS_table3d_init(125);
RSVolume domain = RS_get_domain(H);
if (H->verb > 1) {
rsprint("Cube125 @ X:[ %.2f - %.2f ] Y:[ %.2f - %.2f ] Z:[ %.2f - %.2f ]",
domain.origin.x, domain.origin.x + domain.size.x,
domain.origin.y, domain.origin.y + domain.size.y,
domain.origin.z, domain.origin.z + domain.size.z);
}
// Set up the mapping coefficients
table.x_ = 5; table.xm = 4.0f; table.xs = 5.0f / domain.size.x; table.xo = -domain.origin.x * table.xs;
table.y_ = 5; table.ym = 4.0f; table.ys = 5.0f / domain.size.y; table.yo = -domain.origin.y * table.ys;
table.z_ = 5; table.zm = 4.0f; table.zs = 5.0f / domain.size.z; table.zo = -domain.origin.z * table.zs;
table.tr = 1000.0f;
const float v = 0.5f;
for (i = 0; i < 125; i++) {
table.uvwt[i].x = (float) (i % 5) * v - 2.0f * v;
table.uvwt[i].y = (float)((i % 25) / 5) * v - 2.0f * v;
table.uvwt[i].z = (float) (i / 25) * v - 2.0f * v;
table.uvwt[i].w = 0.0f;
}
RS_set_vel_data(H, table);
RS_table3d_free(table);
}
void RS_clear_vel_data(RSHandle *H) {
// Technically the video RAM hasn't been freed but we will assume there is enough room and this memory gets freed when a new table comes in
for (int i = 0; i < H->num_workers; i++) {
cl_uint nx = (cl_uint)H->workers[i].les_desc.s[RSTable3DDescriptionMaximumX] + 1;
cl_uint ny = (cl_uint)H->workers[i].les_desc.s[RSTable3DDescriptionMaximumY] + 1;
cl_uint nz = (cl_uint)H->workers[i].les_desc.s[RSTable3DDescriptionMaximumZ] + 1;
H->workers[i].mem_usage -= nx * ny * nz * sizeof(cl_float4);
}
}
void RS_set_debris_flux_field_by_pdf(RSHandle *H, RSTable2D *map, const float *pdf) {
int k;
// Some constants
const int n = 2048;
const int pdf_count = map->x_;
const int cdf_count = pdf_count + 1;
double *cdf = (double *)malloc(cdf_count * sizeof(double));
int b, e;
float vl, vh, a, x;
// The corresponding CDF
float cumsum = 0.0f;
for (k = 0; k < pdf_count; k++) {
cdf[k] = cumsum;
cumsum += pdf[k];
if (cumsum > 1.0f) {
if (k < pdf_count - 2) {
rsprint("Error. Bad PDF was supplied, cumsum = %.8f > 1.0 @ k = %d / %d\n", cumsum, k, pdf_count);
free(cdf);
return;
} else if (k < pdf_count - 1) {
rsprint("Warning. PDF value[%d/%d] = %.8f clamped to 1.0\n", k, pdf_count, cumsum);
cumsum = 1.0f;
}
}
}
cdf[k] = 1.0f;
float *tab = (float *)malloc(n * sizeof(float));
// Derive the CDF inverse lookup table
for (k = 0; k < n; k++) {
x = (float)k / (n - 1);
b = 0;
while (cdf[b] <= x && b < cdf_count) {
b++;
}
b = MAX(b - 1, 0);
e = MIN(b + 1, pdf_count);
if (cdf[b] == cdf[e]) {
#if defined(DEBUG_CDF)
printf("roll back (b, e) = (%d, %d) x = %.2f cdf[b] = %.2f -> %s\n", b, e, x, cdf[b], cdf[b] >= x ? "Y" : "N");
#endif
while (cdf[b] >= x && b > 0) {
b--;
}
e = MIN(b + 1, pdf_count);
#if defined(DEBUG_CDF)
printf(" --> (b, e) = (%d, %d)\n", b, e);
#endif
}
// Gather the two points for linear interpolation
vl = (float)b;
vh = (float)e;
if (b == e) {
tab[k] = vl;
} else {
a = (x - cdf[b]) / (cdf[e] - cdf[b]);
if (cdf[b] <= x && x <= cdf[e]) {
tab[k] = vl + a * (vh - vl);
} else {
rsprint("ERROR. Unable to continue. I need upgrades. Tell my father. (b, e) = (%d, %d) x = %.2f\n", b, e, x);
return;
}
}
//printf("k = %3d (b, e) = (%d, %d) x = [%.2f, (%.2f), %.2f] v = [%.2f, (%.2f), %.2f]\n", k, b, e, cdf[b], x, cdf[e], vl, tab[k], vh);
}
// Replace the count of table elements to CDF element count
map->x_ = n;
RS_set_debris_flux_field_by_icdf(H, map, tab);
free(cdf);
free(tab);
}
void RS_set_debris_flux_field_by_icdf(RSHandle *H, RSTable2D *map, const float *icdf) {
int i;
cl_int ret;
const int count = (int)(map->x_);
RSTable table = RS_table_init(count);
if (table.data == NULL) {
rsprint("RS_set_debris_flux_field(): Unable to allocate memory.\n");
return;
}
memcpy(table.data, icdf, count * sizeof(float));
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].dff_icdf[0] == NULL) {
#if defined (_USE_GCL_)
rsprint("Error. This portion still needs to be implemented (A). i = %d\n", i);
#else
H->workers[i].dff_icdf[0] = clCreateBuffer(H->workers[i].context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, count * sizeof(float), table.data, &ret);
H->workers[i].dff_icdf[1] = clCreateBuffer(H->workers[i].context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, count * sizeof(float), table.data, NULL);
H->workers[i].mem_usage += count * sizeof(cl_float);
#endif
if (H->workers[i].dff_icdf[0] == NULL || H->workers[i].dff_icdf[1] == NULL || ret != CL_SUCCESS) {
rsprint("ERROR: workers[%d] unable to create debris flux field table on CL device. ret = %d table of %d @ %p\n", i, ret, count, table.data);
exit(EXIT_FAILURE);
} else if (H->verb > 2) {
rsprint("workers[%d] created debris flux field @ %p %p.", i, H->workers[i].dff_icdf[0], H->workers[i].dff_icdf[1]);
}
} else {
#if defined (_USE_GCL_)
rsprint("Error. This portion still needs to be implemented (B). i = %d\n", i);
#else
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].dff_icdf[H->workers[i].les_id], CL_FALSE, 0, count * sizeof(cl_float), table.data, 0, NULL, &H->workers[i].event_upload);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].dff_icdf[H->workers[i].les_id], CL_FALSE, 0, count * sizeof(cl_float), table.data, 0, NULL, &H->workers[i].event_upload);
#endif
} // if (H->workers[i].iff[0] == NULL) ...
}
for (i = 0; i < H->num_workers; i++) {
#if defined (_USE_GCL_)
rsprint("Error. This portion still needs to be implemented (C).\n")
#else
clWaitForEvents(1, &H->workers[i].event_upload);
#endif
H->workers[i].dff_desc.s[RSTableDescriptionScaleX] = map->xs; // s0
H->workers[i].dff_desc.s[RSTableDescriptionScaleY] = map->ys; // s1
H->workers[i].dff_desc.s[RSTableDescriptionScaleZ] = -1.0f; // s2
H->workers[i].dff_desc.s[RSTableDescriptionOriginX] = map->xo; // s4
H->workers[i].dff_desc.s[RSTableDescriptionOriginY] = map->yo; // s5
H->workers[i].dff_desc.s[RSTableDescriptionOriginZ] = -1.0f; // s6
H->workers[i].dff_desc.s[RSTableDescriptionMaximumX] = map->xm; // s8
H->workers[i].dff_desc.s[RSTableDescriptionMaximumY] = map->ym; // s9
H->workers[i].dff_desc.s[RSTableDescriptionMaximumZ] = -1.0f; // sa
H->workers[i].dff_desc.s[RSTableDescriptionReserved4] = 0.0f; // sc
H->workers[i].dff_desc.s[RSTableDescriptionReserved5] = map->xm + 1.0f; // sd
H->workers[i].dff_desc.s[RSTableDescriptionReserved6] = (float)map->y_; // se
H->workers[i].dff_desc.s[RSTableDescriptionReserved7] = (float)map->x_ - 1.0f; // sf
}
RS_table_free(table);
}
void RS_set_debris_flux_field_to_center_cell_of_3x3(RSHandle *H) {
// CDF: 3 x 3 flux field with a middle cell at 1.0 (cell 4)
//
// 1.0 | o o o o
// | | | | |
// 0.0 o-o-o-o-o-+-+-+-+---> cell index
// 0 1 2 3 4 5 6 7 8
//
// iCDF should be:
//
// 1.0 | (This point does not exist in the table
// | but it is assumed in this convention)
// 5/count | ... o
// 4/count o ... |
// | |
// +-----------+--> v
// 0 ... 1.0
//
// iCDF = [4 4.1 4.2 ... 5]
//
float icdf[2] = {4.0f, 5.0f};
RSTable2D map = {
.x_ = sizeof(icdf) / sizeof(float),
.xs = 1.0 / 3.0f,
.xo = -1.0f,
.xm = 2.0f,
.ys = 1.0 / 3.0f,
.yo = -1.0f,
.ym = 2.0f
};
RS_set_debris_flux_field_by_icdf(H, &map, icdf);
}
void RS_set_debris_flux_field_to_checker_board(RSHandle *H, const int c) {
int k;
// Some constants for the derived CDF
const int pdf_count = c * c;
float *pdf = (float *)malloc(pdf_count * sizeof(float));
// Psuedo-PDF, not yet normalized
memset(pdf, 0, pdf_count * sizeof(float));
for (k = 0; k < pdf_count; k += 2) {
pdf[k] = 100.0;
}
// Actual PDF after a proper normalization
float sum = 0.0;
for (k = 0; k < pdf_count; k++) {
sum += pdf[k];
}
for (k = 0; k < pdf_count; k++) {
pdf[k] /= sum;
}
// rsprint("=== %.2f %.2f / %.2f %.2f\n", H->sim_desc.hi.s4, H->sim_desc.hi.s5, H->sim_desc.hi.s0, H->sim_desc.hi.s1);
// Mapping convention
RSTable2D map = {
.x_ = c * c, // Total number of cells
.y_ = 2, // Table convention: 0 - uniform grid, 1 - stretched_grid, >= 2 - test modes
.xs = 1.0f / (float)c, // Scale of X
.ys = 1.0f / (float)c, // Scale of Y
.xo = -1.0f, // Offset of X
.yo = -1.0f, // Offset of Y
.xm = (float)(c - 1), // Maximum cell index of X (for CL kernel)
.ym = (float)(c - 1) // Maximum cell index of Y (for CL kernel)
};
RS_set_debris_flux_field_by_pdf(H, &map, pdf);
free(pdf);
}
void RS_set_debris_flux_field_to_checker_board_stretched(RSHandle *H, const LESTable *leslie) {
int k;
int ix, iy;
float dx, dy;
const int c = 9;
// Scale the r constant so that the rate increases quicker since we use a small c
const float q = 1.46f;
// Some constants for the derived CDF
const int pdf_count = c * c;
const float m = 0.5f * (float)(c - 1);
// A local storage for the pdf function
float *pdf = (float *)malloc(pdf_count * sizeof(float));
// Psuedo-PDF, not yet normalized
memset(pdf, 0, pdf_count * sizeof(float));
for (k = 0; k < pdf_count; k += 2) {
if (leslie->is_stretched) {
iy = k / c;
ix = k % c;
// d(k) = a * r ^ k
dx = leslie->ax * powf(q * leslie->rx, fabs((float)ix - m));
dy = leslie->ay * powf(q * leslie->ry, fabs((float)iy - m));
pdf[k] = dx * dy;
} else {
pdf[k] = 1.0;
}
}
// Actual PDF after normalization
float sum = 0.0;
for (k = 0; k < pdf_count; k++) {
sum += pdf[k];
}
for (k = 0; k < pdf_count; k++) {
pdf[k] /= sum;
}
RSTable2D map = {
.x_ = pdf_count,
.y_ = 1
};
if (leslie->is_stretched) {
//
// z(k) = a * (1.0 - r ^ k) / (1.0 - r)
// = (1.0 - pow(r, k)) * a / (1.0 - r)
// = pow(r, k) * -a / (1.0 - r) + a / (1.0 - r)
// = fma(pow(r, k), -a / (1.0 - r), a / (1.0 - r))
//
// Note that k is assumed to have value of 0.0 at the middle of the domain
//
// Need to pass (1) = a / (1.0 - r), (2) r, and (3) count of x
//
map.xs = leslie->ax / (1.0f - leslie->rx); // --> dff_desc.s0
map.ys = leslie->ay / (1.0f - leslie->ry); // --> dff_desc.s1
map.xo = q * leslie->rx; // --> dff_desc.s4
map.yo = q * leslie->ry; // --> dff_desc.s5
} else {
map.xs = leslie->rx;
map.ys = leslie->ry;
map.xo = (float)(leslie->nx - 1) * 0.5f * leslie->rx;
map.yo = (float)(leslie->ny - 1) * 0.5f * leslie->ry;
}
map.xm = (float)(c - 1); // --> dff_desc.s8
map.ym = (float)(c - 1); // --> dff_desc.s9
// Temporary use a local pdf instead of leslie-flux
RS_set_debris_flux_field_by_pdf(H, &map, pdf);
// Be a good citizen, clean up
free(pdf);
}
void RS_set_debris_flux_field_from_LES(RSHandle *H, const LESTable *leslie) {
int k;
int ix, iy;
float dx, dy;
// Some constants for the derived CDF
const int pdf_count = leslie->nx * leslie->ny;
const float mx = 0.5f * (float)(leslie->nx - 1);
const float my = 0.5f * (float)(leslie->ny - 1);
// Derive a PDF from velocity
float v;
float sum = 0.0;
int i = pdf_count;
for (k = 0; k < pdf_count; k++) {
v = leslie->data.u[i] * leslie->data.u[i] + leslie->data.v[i] * leslie->data.v[i] + leslie->data.w[i] * leslie->data.w[i];
if (leslie->is_stretched) {
iy = k / leslie->nx;
ix = k % leslie->nx;
// d(k) = a * r ^ k
dx = leslie->ax * powf(leslie->rx, fabs((float)ix - mx));
dy = leslie->ay * powf(leslie->ry, fabs((float)iy - my));
v *= dx * dy;
}
leslie->flux[k] = v;
sum += v;
i++;
}
for (k = 0; k < pdf_count; k++) {
leslie->flux[k] /= sum;
}
RSTable2D map = {
.x_ = pdf_count,
.y_ = leslie->is_stretched
};
if (leslie->is_stretched) {
//
// z(k) = a * (1.0 - r ^ k) / (1.0 - r)
// = (1.0 - pow(r, k)) * a / (1.0 - r)
// = pow(r, k) * -a / (1.0 - r) + a / (1.0 - r)
// = fma(pow(r, k), -a / (1.0 - r), a / (1.0 - r))
//
// Note that k is assumed to have value of 0.0 at the middle of the domain
//
// Need to pass (1) = a / (1.0 - r), (2) r, and (3) count of x
//
map.xs = leslie->ax / (1.0f - leslie->rx); // --> dff_desc.s0
map.ys = leslie->ay / (1.0f - leslie->ry); // --> dff_desc.s1
map.xo = leslie->rx; // --> dff_desc.s4
map.yo = leslie->ry; // --> dff_desc.s5
} else {
map.xs = leslie->rx;
map.ys = leslie->ry;
map.xo = (float)(leslie->nx - 1) * 0.5f * leslie->rx;
map.yo = (float)(leslie->ny - 1) * 0.5f * leslie->ry;
}
map.xm = (float)(leslie->nx - 1); // --> dff_desc.s8
map.ym = (float)(leslie->ny - 1); // --> dff_desc.s9
RS_set_debris_flux_field_by_pdf(H, &map, leslie->flux);
}
void RS_set_scan_pattern(RSHandle *H, const POSPattern *scan_pattern) {
H->P = (POSHandle)scan_pattern;
if (H->verb > 2) {
POS_summary(H->P);
}
}
void RS_set_scan_pattern_with_string(RSHandle *H, const char *scan_string) {
POSPattern *scan_pattern = POS_init_with_string(scan_string);
RS_set_scan_pattern(H, scan_pattern);
}
void RS_set_adm_data(RSHandle *H, const RSTable2D cd, const RSTable2D cm) {
int i;
const int t = H->workers[0].adm_count;
const size_t n = cd.x_ * cd.y_;
if (cm.x_ * cm.y_ != n) {
rsprint("WARNING. RS_set_adm_data() received inconsistent cd (%d x %d) & cm (%d x %d) dimensions", cd.x_, cd.y_, cm.x_, cm.y_);
return;
}
if (H->verb > 2) {
rsprint("ADM[%d] @ X:[ -M_PI - +M_PI ] Y:[ 0 - M_PI ]", H->workers[0].adm_count);
}
// This is the part that we need to create two texture maps for each RSTable2D table
cl_image_format format = {CL_RGBA, CL_FLOAT};
#if defined (CL_VERSION_1_2)
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = cd.x_;
desc.image_height = cd.y_;
desc.image_depth = 1;
desc.image_array_size = 0;
desc.image_row_pitch = desc.image_width * sizeof(cl_float4);
desc.image_slice_pitch = desc.image_height * desc.image_row_pitch;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = NULL;
#endif
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].adm_cd[t] != NULL && H->workers[i].adm_cm[t] != NULL) {
#if defined (_USE_GCL_)
gcl_release_image(H->workers[i].adm_cd[t]);
gcl_release_image(H->workers[i].adm_cm[t]);
#else
clReleaseMemObject(H->workers[i].adm_cd[t]);
clReleaseMemObject(H->workers[i].adm_cm[t]);
#endif
H->workers[i].mem_usage -= ((cl_uint)(H->workers[i].adm_desc[t].s8 + 1.0f) * (H->workers[i].adm_desc[t].s9 + 1.0f)) * 2 * sizeof(cl_float4);
}
// adm_cd & adm_cm always have the same desc
#if defined (_USE_GCL_)
H->workers[i].adm_cd[t] = gcl_create_image(&format, cd.x_, cd.y_, 1, H->workers[i].surf_adm_cd[t]);
H->workers[i].adm_cm[t] = gcl_create_image(&format, cm.x_, cm.y_, 1, H->workers[i].surf_adm_cm[t]);
#elif defined (CL_VERSION_1_2)
cl_int retd, retm;
cl_mem_flags flags = CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR;
H->workers[i].adm_cd[t] = clCreateImage(H->workers[i].context, flags, &format, &desc, cd.data, &retd);
H->workers[i].adm_cm[t] = clCreateImage(H->workers[i].context, flags, &format, &desc, cm.data, &retm);
#else
H->workers[i].adm_cd[t] = clCreateImage2D(H->workers[i].context, flags, &format, cd.x_, cd.y_, cd.x_ * sizeof(cl_float4), cd.data, &retd);
H->workers[i].adm_cm[t] = clCreateImage2D(H->workers[i].context, flags, &format, cm.x_, cm.y_, cm.x_ * sizeof(cl_float4), cm.data, &retm);
#endif
if (H->workers[i].adm_cd[t] == NULL || H->workers[i].adm_cm[t] == NULL) {
rsprint("ERROR: workers[%d] unable to create ADM tables on CL device(s).", i);
return;
} else if (H->verb > 2) {
rsprint("workers[%d] created ADM tables adm_cd[%d] & adm_cd[%d] @ %p & %p", i, t, t, &H->workers[i].adm_cd[t], &H->workers[i].adm_cm[t]);
}
#if defined (_USE_GCL_)
dispatch_async(H->workers[i].que, ^{
size_t origin[3] = {0, 0, 0};
size_t region[3] = {cd.x_, cd.y_, 1};
gcl_copy_ptr_to_image(H->workers[i].adm_cd[t], cd.data, origin, region);
gcl_copy_ptr_to_image(H->workers[i].adm_cm[t], cm.data, origin, region);
dispatch_semaphore_signal(H->workers[i].sem);
});
#endif
}
for (i = 0; i < H->num_workers; i++) {
#if defined (_USE_GCL_)
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
#endif
// Copy over to the CL worker
H->workers[i].adm_desc[t].s[RSTable3DDescriptionScaleX] = cd.xs;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionScaleY] = cd.ys;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionScaleZ] = 0.0f;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionOriginX] = cd.xo;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionOriginY] = cd.yo;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionOriginZ] = 0.0f;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionMaximumX] = cd.xm;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionMaximumY] = cd.ym;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionMaximumZ] = 0.0f;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionRecipInLnX] = H->adm_desc[t].phys.inv_inln_x;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionRecipInLnY] = H->adm_desc[t].phys.inv_inln_y;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionRecipInLnZ] = H->adm_desc[t].phys.inv_inln_z;
H->workers[i].adm_desc[t].s[RSTable3DDescriptionTachikawa] = H->adm_desc[t].phys.Ta;
H->workers[i].adm_count++;
H->workers[i].mem_usage += ((cl_uint)(cd.xm + 1.0f) * (cd.ym + 1.0f)) * 2 * sizeof(cl_float4);
}
}
void RS_set_adm_data_to_ADM_table(RSHandle *H, const ADMTable *adam) {
int i;
RSTable2D cd = RS_table2d_init(adam->nn);
RSTable2D cm = RS_table2d_init(adam->nn);
if (cd.data == NULL || cm.data == NULL) {
rsprint("ADM input data cannot be NULL.");
return;
}
// Set up the mapping coefficients
// Assumptions: maps are always in beta in [-180deg, +180deg] and alpha in [0, +180deg]
cd.x_ = adam->nb; cd.xm = (float)(cd.x_ - 1); cd.xs = (float)(adam->nb - 1) / (2.0f * M_PI); cd.xo = -(-M_PI) * cd.xs + 0.5f;
cd.y_ = adam->na; cd.ym = (float)(cd.y_ - 1); cd.ys = (float)(adam->na - 1) / M_PI; cd.yo = 0.5f;
cm.x_ = adam->nb; cm.xm = (float)(cm.x_ - 1); cm.xs = (float)(adam->nb - 1) / (2.0f * M_PI); cm.xo = -(-M_PI) * cm.xs + 0.5f;
cm.y_ = adam->na; cm.ym = (float)(cm.y_ - 1); cm.ys = (float)(adam->na - 1) / M_PI; cm.yo = 0.5f;
// Arrange ADM values into float4, getting ready for GPU's global memory
for (i = 0; i < adam->nn; i++) {
cd.data[i].x = adam->data.cdx[i];
cd.data[i].y = adam->data.cdy[i];
cd.data[i].z = adam->data.cdz[i];
cd.data[i].w = 0.0f;
cm.data[i].x = adam->data.cmx[i];
cm.data[i].y = adam->data.cmy[i];
cm.data[i].z = adam->data.cmz[i];
cm.data[i].w = 0.0f;
}
// Cache a copy of the parameters but not the data, the data could be deallocated immediately after this function call.
H->adm_desc[H->workers[0].adm_count] = *adam;
memset(&H->adm_desc[H->workers[0].adm_count].data, 0, sizeof(ADMData));
if (H->verb > 1) {
const int t = H->workers[0].adm_count;
rsprint("GPU ADM[%d] Ta = %.4f inv_inln = [%.4f %.4f %.4f] mass = %.4f kg",
t, H->adm_desc[t].phys.Ta, H->adm_desc[t].phys.inv_inln_x, H->adm_desc[t].phys.inv_inln_y, H->adm_desc[t].phys.inv_inln_z, H->adm_desc[t].phys.mass);
}
RS_set_adm_data(H, cd, cm);
RS_table2d_free(cd);
RS_table2d_free(cm);
}
void RS_set_adm_data_to_unity(RSHandle *H) {
int i;
RSTable2D table = RS_table2d_init(9);
if (H->verb > 1) {
rsprint("ADM to unity @ X:[ -M_PI - M_PI ] Y:[ 0 - M_PI ]");
}
table.x_ = 3; table.xm = 2.0f; table.xs = 3.0f / (2.0f * M_PI); table.xo = -(-M_PI) * table.xs;
table.y_ = 3; table.ym = 2.0f; table.ys = 3.0f / M_PI; table.yo = 0.0f;
for (i = 0; i < 9; i++) {
table.data[i].x = 1.0f;
table.data[i].y = 1.0f;
table.data[i].z = 1.0f;
table.data[i].w = 0.0f;
}
RS_set_adm_data(H, table, table);
RS_table2d_free(table);
}
void RS_clear_adm_data(RSHandle *H) {
for (int i = 0; i < H->num_workers; i++) {
for (int t = 0; t < H->workers[i].adm_count; t++) {
cl_uint nx = (cl_uint)H->workers[i].adm_desc[t].s[RSTable3DDescriptionMaximumX] + 1;
cl_uint ny = (cl_uint)H->workers[i].adm_desc[t].s[RSTable3DDescriptionMaximumY] + 1;
H->workers[i].mem_usage -= nx * ny * 2 * sizeof(cl_float4);
}
H->workers[0].adm_count = 0;
}
}
void RS_set_rcs_data(RSHandle *H, const RSTable2D real, const RSTable2D imag) {
int i;
const int t = H->workers[0].rcs_count;
const size_t n = real.x_ * real.y_;
if (imag.x_ * imag.y_ != n) {
rsprint("WARNING. RS_set_rcs_data() received inconsistent real (%d x %d) & imag (%d x %d) dimensions", now(), real.x_, real.y_, imag.x_, imag.y_);
return;
}
if (H->verb > 1) {
rsprint("GPU RCS[%d] @ X:[ -M_PI - +M_PI ] Y:[ 0 - M_PI ]", H->workers[0].rcs_count);
}
// This is the part that we need to create two texture maps for each RSTable2D table
cl_image_format format = {CL_RGBA, CL_FLOAT};
#if defined (CL_VERSION_1_2)
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = real.x_;
desc.image_height = real.y_;
desc.image_depth = 1;
desc.image_array_size = 0;
desc.image_row_pitch = desc.image_width * sizeof(cl_float4);
desc.image_slice_pitch = desc.image_height * desc.image_row_pitch;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = NULL;
#endif
for (i = 0; i < H->num_workers; i++) {
if (H->workers[i].rcs_real[t] != NULL && H->workers[i].rcs_imag[t] != NULL) {
#if defined (_USE_GCL_)
gcl_release_image(H->workers[i].rcs_real[t]);
gcl_release_image(H->workers[i].rcs_imag[t]);
#else
clReleaseMemObject(H->workers[i].rcs_real[t]);
clReleaseMemObject(H->workers[i].rcs_imag[t]);
#endif
H->workers[i].mem_usage -= ((cl_uint)(H->workers[i].rcs_desc[t].s8 + 1.0f) * (H->workers[i].rcs_desc[t].s9 + 1.0f)) * 2 * sizeof(cl_float4);
}
// rcs_real & rcs_imag always have the same desc
#if defined (_USE_GCL_)
H->workers[i].rcs_real[t] = gcl_create_image(&format, real.x_, real.y_, 1, H->workers[i].surf_rcs_real[t]);
H->workers[i].rcs_imag[t] = gcl_create_image(&format, imag.x_, imag.y_, 1, H->workers[i].surf_rcs_imag[t]);
#else
cl_int retd, retm;
cl_mem_flags flags = CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR;
#if defined (CL_VERSION_1_2)
H->workers[i].rcs_real[t] = clCreateImage(H->workers[i].context, flags, &format, &desc, real.data, &retd);
H->workers[i].rcs_imag[t] = clCreateImage(H->workers[i].context, flags, &format, &desc, imag.data, &retm);
#else
H->workers[i].rcs_real[t] = clCreateImage2D(H->workers[i].context, flags, &format, real.x_, real.y_, real.x_ * sizeof(cl_float4), real.data, &retd);
H->workers[i].rcs_imag[t] = clCreateImage2D(H->workers[i].context, flags, &format, imag.x_, imag.y_, imag.x_ * sizeof(cl_float4), imag.data, &retm);
#endif
#endif
if (H->workers[i].rcs_real[t] == NULL || H->workers[i].rcs_imag[t] == NULL) {
rsprint("ERROR: workers[%d] unable to create RCS tables on CL device(s).", i);
return;
} else if (H->verb > 2) {
rsprint("workers[%d] created RCS tables rcs_real[%d] & rcs_imag[%d] @ %p & %p", i, t, t, &H->workers[i].rcs_real[t], &H->workers[i].rcs_imag[t]);
}
#if defined (_USE_GCL_)
dispatch_async(H->workers[i].que, ^{
size_t origin[3] = {0, 0, 0};
size_t region[3] = {real.x_, imag.y_, 1};
gcl_copy_ptr_to_image(H->workers[i].rcs_real[t], real.data, origin, region);
gcl_copy_ptr_to_image(H->workers[i].rcs_imag[t], imag.data, origin, region);
dispatch_semaphore_signal(H->workers[i].sem);
});
#endif
}
for (i = 0; i < H->num_workers; i++) {
#if defined (_USE_GCL_)
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
#endif
// Copy over to the CL worker
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionScaleX] = real.xs;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionScaleY] = real.ys;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionScaleZ] = 0.0f;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionOriginX] = real.xo;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionOriginY] = real.yo;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionOriginZ] = 0.0f;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionMaximumX] = real.xm;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionMaximumY] = real.ym;
H->workers[i].rcs_desc[t].s[RSTable3DDescriptionMaximumZ] = 0.0f;
H->workers[i].rcs_count++;
H->workers[i].mem_usage += ((cl_uint)(real.xm + 1.0f) * (real.ym + 1.0f)) * 2 * sizeof(cl_float4);
}
}
void RS_set_rcs_data_to_RCS_table(RSHandle *H, const RCSTable *rosie) {
int i;
RSTable2D real = RS_table2d_init(rosie->nn);
RSTable2D imag = RS_table2d_init(rosie->nn);
if (real.data == NULL || imag.data == NULL) {
rsprint("ERROR: RCS input data cannot be NULL.");
return;
}
// Set up the mapping coefficients
// Assumptions: maps are always in alpha in [-180deg, +180deg] and beta in [0, +180deg]
real.x_ = rosie->na; real.xm = (float)(real.x_ - 1); real.xs = (float)(rosie->na - 1) / (2.0f * M_PI); real.xo = -(-M_PI) * real.xs;
real.y_ = rosie->nb; real.ym = (float)(real.y_ - 1); real.ys = (float)(rosie->nb - 1) / M_PI; real.yo = 0.0f;
imag.x_ = rosie->na; imag.xm = (float)(imag.x_ - 1); imag.xs = (float)(rosie->na - 1) / (2.0f * M_PI); imag.xo = -(-M_PI) * real.xs;
imag.y_ = rosie->nb; imag.ym = (float)(imag.y_ - 1); imag.ys = (float)(rosie->nb - 1) / M_PI; imag.yo = 0.0f;
// Arrange RCS values into float4, getting ready for GPU's global memory
for (i = 0; i < rosie->nn; i++) {
real.data[i].x = rosie->data.hh_real[i];
real.data[i].y = rosie->data.vv_real[i];
real.data[i].z = rosie->data.hv_real[i];
real.data[i].w = 0.0f;
imag.data[i].x = rosie->data.hh_imag[i];
imag.data[i].y = rosie->data.vv_imag[i];
imag.data[i].z = rosie->data.hv_imag[i];
imag.data[i].w = 0.0f;
}
// Cache a copy of the parameters but not the data, the data could be deallocated immediately after this function call.
H->rcs_desc[H->workers[0].rcs_count] = *rosie;
memset(&H->rcs_desc[H->workers[0].rcs_count].data, 0, sizeof(RCSData));
if (H->verb > 1) {
const int t = H->workers[0].rcs_count;
rsprint("GPU RCS[%d] lambda = %.2f m",
t, H->rcs_desc[t].lambda);
}
RS_set_rcs_data(H, real, imag);
RS_table2d_free(real);
RS_table2d_free(imag);
}
void RS_set_rcs_data_to_unity(RSHandle *H) {
int i;
RSTable2D table_real = RS_table2d_init(9);
RSTable2D table_imag = RS_table2d_init(9);
if (H->verb > 1) {
rsprint("RCS to unity @ X:[ -M_PI - M_PI ] Y:[ 0 - M_PI ]");
}
table_real.x_ = 3; table_real.xm = 2.0f; table_real.xs = 3.0f / (2.0f * M_PI); table_real.xo = -(-M_PI) * table_real.xs;
table_real.y_ = 3; table_real.ym = 2.0f; table_real.ys = 3.0f / M_PI; table_real.yo = 0.0f;
table_imag.x_ = 3; table_imag.xm = 2.0f; table_imag.xs = 3.0f / (2.0f * M_PI); table_imag.xo = -(-M_PI) * table_imag.xs;
table_imag.y_ = 3; table_imag.ym = 2.0f; table_imag.ys = 3.0f / M_PI; table_imag.yo = 0.0f;
for (i = 0; i < 9; i++) {
table_real.data[i].x = 1.0f;
table_real.data[i].y = 1.0f;
table_real.data[i].z = 1.0f;
table_real.data[i].w = 0.0f;
table_imag.data[i].x = 0.0f;
table_imag.data[i].y = 0.0f;
table_imag.data[i].z = 0.0f;
table_imag.data[i].w = 0.0f;
}
RS_set_rcs_data(H, table_real, table_imag);
RS_table2d_free(table_real);
RS_table2d_free(table_imag);
}
void RS_clear_rcs_data(RSHandle *H) {
for (int i = 0; i < H->num_workers; i++) {
for (int t = 0; t < H->workers[i].rcs_count; t++) {
cl_uint nx = (cl_uint)H->workers[i].rcs_desc[t].s[RSTable3DDescriptionMaximumX] + 1;
cl_uint ny = (cl_uint)H->workers[i].rcs_desc[t].s[RSTable3DDescriptionMaximumY] + 1;
H->workers[i].mem_usage -= nx * ny * 2 * sizeof(cl_float4);
}
H->workers[i].rcs_count = 0;
}
}
// This method can be confusing. Don't use
void RS_set_obj_data_to_config(RSHandle *H, OBJConfig type) {
OBJTable *obj_table = OBJ_get_table(H->O, type);
RS_set_adm_data_to_ADM_table(H, obj_table->adm_table);
RS_set_rcs_data_to_RCS_table(H, obj_table->rcs_table);
}
void RS_set_random_seed(RSHandle *H, const unsigned int seed) {
if (H->verb) {
rsprint("Random number generator set to use seed %s", commaint(seed));
}
H->random_seed = seed;
}
// Add debris to the simulation machine
void RS_add_debris(RSHandle *H, OBJConfig type, const size_t count) {
int k = 1;
if (H->O == NULL) {
H->O = OBJ_init();
if (H->O == NULL) {
rsprint("ERROR: OBJ_init() failed.");
exit(EXIT_FAILURE);
}
}
while (H->counts[k] > 0 && k < RS_MAX_DEBRIS_TYPES) {
k++;
}
if (k == RS_MAX_DEBRIS_TYPES || H->workers[0].adm_count == RS_MAX_ADM_TABLES || H->workers[0].rcs_count == RS_MAX_RCS_TABLES) {
rsprint("Unable to add more debris type.");
return;
}
OBJTable *obj_table = OBJ_get_table(H->O, type);
RS_set_adm_data_to_ADM_table(H, obj_table->adm_table);
RS_set_rcs_data_to_RCS_table(H, obj_table->rcs_table);
H->counts[k] = count;
H->num_types++;
if (k != H->workers[0].adm_count || k != H->workers[0].rcs_count) {
rsprint("WARNING: Inconsistent k = %d vs H->workers[0].adm_count = %d vs H->workers[0].rcs_count = %d.", k, H->workers[0].adm_count, H->workers[0].rcs_count);
return;
}
if (H->verb) {
rsprint("Total number of body types = %d (including meteorological scatterers)", (int)H->num_types);
}
}
#pragma mark -
#pragma mark GUI Specific Functions
#if defined (GUI) || defined (_USE_GCL_)
// Compute auxiliary attributes: range, angular weight, etc.
// Users should not need to call this directly. It's either RS_make_pulse() or RS_update_colors()
// The framework will check the status to avoid redundant computations.
void RS_update_auxiliary_attributes(RSHandle *H) {
int i;
if (!(H->status & RSStatusDomainPopulated)) {
rsprint("ERROR: Simulation domain not populated.");
return;
}
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
scat_sig_aux_kernel(&H->workers[i].ndrange_scat_all,
(cl_float4 *)H->workers[i].scat_sig,
(cl_float4 *)H->workers[i].scat_aux,
(cl_float4 *)H->workers[i].scat_pos,
(cl_float4 *)H->workers[i].scat_rcs,
(cl_float *)H->workers[i].angular_weight,
H->workers[i].angular_weight_desc,
H->sim_desc);
dispatch_semaphore_signal(H->workers[i].sem);
});
}
for (i = 0; i < H->num_workers; i++) {
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
#else
cl_event events[RS_MAX_GPU_DEVICE];
memset(events, 0, sizeof(events));
for (i = 0; i < H->num_workers; i++) {
RSWorker *C = &H->workers[i];
clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_scat_sig_aux, 1, NULL, &C->num_scats, NULL, 0, NULL, &events[i]);
}
for (i = 0; i < H->num_workers; i++) {
clFlush(H->workers[i].que);
}
for (i = 0; i < H->num_workers; i++) {
clWaitForEvents(1, events);
clReleaseEvent(events[i]);
}
H->status &= ~RSStatusScattererSignalNeedsUpdate;
#endif
}
void RS_update_colors(RSHandle *H) {
int i, k;
int r, a;
if (!(H->status & RSStatusDomainPopulated)) {
rsprint("ERROR: Simulation domain not populated.");
return;
}
#if defined (_USE_GCL_)
if (H->status & RSStatusScattererSignalNeedsUpdate) {
for (i = 0; i < H->num_workers; i++) {
r = 0;
a = 0;
RSWorker *C = &H->workers[i];
for (k = 1; k < H->num_types; k++) {
if (C->counts[k]) {
dispatch_async(C->que, ^{
db_rcs_kernel(&C->ndrange_scat[k],
(cl_float4 *)C->scat_pos,
(cl_float4 *)C->scat_ori,
(cl_float4 *)C->scat_rcs,
(cl_image)H->workers[i].rcs_real[r],
(cl_image)H->workers[i].rcs_imag[r],
H->workers[i].rcs_desc[r],
H->sim_desc);
dispatch_semaphore_signal(C->sem);
});
}
r = r == H->workers[k].rcs_count - 1 ? 0 : r + 1;
a = a == H->workers[k].adm_count - 1 ? 0 : a + 1;
}
for (k = 1; k < H->num_types; k++) {
if (C->counts[k]) {
dispatch_semaphore_wait(C->sem, DISPATCH_TIME_FOREVER);
}
}
dispatch_async(C->que, ^{
scat_sig_aux_kernel(&C->ndrange_scat_all,
(cl_float4 *)C->scat_sig,
(cl_float4 *)C->scat_aux,
(cl_float4 *)C->scat_pos,
(cl_float4 *)C->scat_rcs,
(cl_float *)C->angular_weight,
C->angular_weight_desc,
H->sim_desc);
scat_clr_kernel(&C->ndrange_scat_all,
(cl_float4 *)C->scat_clr,
(cl_float4 *)C->scat_pos,
(cl_float4 *)C->scat_aux,
(cl_float4 *)C->scat_rcs,
H->draw_mode);
dispatch_semaphore_signal(C->sem);
});
dispatch_semaphore_wait(C->sem, DISPATCH_TIME_FOREVER);
}
} else {
for (i = 0; i < H->num_workers; i++) {
RSWorker *C = &H->workers[i];
dispatch_async(H->workers[i].que, ^{
// Set individual color based on draw mode
scat_clr_kernel(&C->ndrange_scat_all,
(cl_float4 *)C->scat_clr,
(cl_float4 *)C->scat_pos,
(cl_float4 *)C->scat_aux,
(cl_float4 *)C->scat_rcs,
H->draw_mode);
dispatch_semaphore_signal(C->sem);
});
dispatch_semaphore_wait(C->sem, DISPATCH_TIME_FOREVER);
}
}
#else
cl_event events[RS_MAX_GPU_DEVICE][H->num_types];
memset(events, 0, sizeof(events));
if (H->status & RSStatusDebrisRCSNeedsUpdate) {
// Very similar to the RS_advance_time() function but only the debris RCS is updated
for (i = 0; i < H->num_workers; i++) {
r = 0;
a = 0;
RSWorker *C = &H->workers[i];
for (k = 1; k < H->num_types; k++) {
if (C->counts[k]) {
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionReal, sizeof(cl_mem), &C->rcs_real[r]);
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionImag, sizeof(cl_mem), &C->rcs_imag[r]);
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionDescription, sizeof(cl_float16), &C->rcs_desc[r]);
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_db_rcs, 1, &C->origins[k], &C->counts[k], NULL, 0, NULL, &events[i][k]);
}
r = r == H->workers[i].rcs_count - 1 ? 0 : r + 1;
a = a == H->workers[i].adm_count - 1 ? 0 : a + 1;
}
}
for (i = 0; i < H->num_workers; i++) {
clFlush(H->workers[i].que);
}
for (i = 0; i < H->num_workers; i++) {
for (k = 1; k < H->num_types; k++) {
if (H->workers[i].counts[k]) {
clWaitForEvents(1, &events[i][k]);
clReleaseEvent(events[i][k]);
}
}
}
H->status |= RSStatusScattererSignalNeedsUpdate;
}
for (i = 0; i < H->num_workers; i++) {
if (H->status & RSStatusScattererSignalNeedsUpdate) {
RSWorker *C = &H->workers[i];
clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_scat_sig_aux, 1, NULL, &C->num_scats, NULL, 0, NULL, &events[i][0]);
clSetKernelArg(C->kern_scat_clr, RSScattererColorKernelArgumentDrawMode, sizeof(cl_uint4), &H->draw_mode);
clEnqueueNDRangeKernel(C->que, C->kern_scat_clr, 1, NULL, &C->num_scats, NULL, 1, &events[i][0], &events[i][1]);
} else {
RSWorker *C = &H->workers[i];
clSetKernelArg(C->kern_scat_clr, RSScattererColorKernelArgumentDrawMode, sizeof(cl_uint4), &H->draw_mode);
clEnqueueNDRangeKernel(C->que, C->kern_scat_clr, 1, NULL, &C->num_scats, NULL, 0, NULL, &events[i][1]);
}
}
for (i = 0; i < H->num_workers; i++) {
clFlush(H->workers[i].que);
}
for (i = 0; i < H->num_workers; i++) {
clWaitForEvents(1, &events[i][1]);
clReleaseEvent(events[i][1]);
if (H->status & RSStatusScattererSignalNeedsUpdate) {
clReleaseEvent(events[i][0]);
}
}
#endif
H->status &= ~RSStatusDebrisRCSNeedsUpdate;
H->status &= ~RSStatusScattererSignalNeedsUpdate;
}
void RS_share_mem_with_vbo(RSHandle *H, const int n, unsigned int vbo[][n]) {
if (H->verb) {
if (H->num_workers == 1) {
printf("%s : RS : RS_share_mem_with_vbo() [ %d %d %d ]\n", now(),
vbo[0][0], vbo[0][1], vbo[0][2]);
} else {
printf("%s : RS : RS_share_mem_with_vbo() [ %d %d %d ] [ %d %d %d ]\n", now(),
vbo[0][0], vbo[0][1], vbo[0][2],
vbo[1][0], vbo[1][1], vbo[1][2]);
}
}
for (int i = 0; i < H->num_workers; i++) {
H->workers[i].vbo_scat_pos = vbo[i][0];
H->workers[i].vbo_scat_clr = vbo[i][1];
H->workers[i].vbo_scat_ori = vbo[i][2];
}
H->has_vbo_from_gl = 1;
}
#endif
#if defined (_USE_GCL_)
void RS_derive_ndranges(RSHandle *H) {
for (int i = 0; i < H->num_workers; i++) {
RSWorker *C = &H->workers[i];
C->ndrange_scat_all.work_dim = 1;
C->ndrange_scat_all.global_work_offset[0] = 0;
C->ndrange_scat_all.global_work_size[0] = C->num_scats;
C->ndrange_scat_all.local_work_size[0] = 0;
for (int k = 0; k < H->num_types; k++) {
if (H->counts[k] == 0) {
continue;
}
C->ndrange_scat[k].work_dim = 1;
C->ndrange_scat[k].global_work_offset[0] = C->origins[k];
C->ndrange_scat[k].global_work_size[0] = C->counts[k];
C->ndrange_scat[k].local_work_size[0] = 0;
if (C->verb > 2) {
rsprint("work[%d] offset, size = %d, %d",
(int)C->name, (int)C->ndrange_scat[k].global_work_offset[0], (int)C->ndrange_scat[k].global_work_size[0]);
}
}
C->ndrange_pulse_pass_1.work_dim = 1;
C->ndrange_pulse_pass_1.global_work_offset[0] = 0;
C->ndrange_pulse_pass_1.global_work_size[0] = C->make_pulse_params.global[0];
C->ndrange_pulse_pass_1.local_work_size[0] = C->make_pulse_params.local[0];
C->ndrange_pulse_pass_2.work_dim = 1;
C->ndrange_pulse_pass_2.global_work_offset[0] = 0;
C->ndrange_pulse_pass_2.global_work_size[0] = C->make_pulse_params.global[1];
C->ndrange_pulse_pass_2.local_work_size[0] = C->make_pulse_params.local[1];
}
}
#endif
#pragma mark -
#pragma mark Framework Functions
void RS_io_test(RSHandle *H) {
int i;
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
io_kernel(&H->workers[i].ndrange_scat[0],
(cl_float4 *)H->workers[i].scat_pos,
(cl_float4 *)H->workers[i].scat_aux);
dispatch_semaphore_signal(H->workers[i].sem);
});
}
for (i = 0; i < H->num_workers; i++) {
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
#else
for (i = 0; i < H->num_workers; i++) {
clEnqueueNDRangeKernel(H->workers[i].que, H->workers[i].kern_io, 1, NULL, &H->workers[i].num_scats, NULL, 0, NULL, NULL);
}
for (i = 0; i < H->num_workers; i++) {
clFlush(H->workers[i].que);
}
for (i = 0; i < H->num_workers; i++) {
clFinish(H->workers[i].que);
}
#endif
}
void RS_populate(RSHandle *H) {
int i, k, n, w;
if (H->verb > 1) {
rsprint("RS_populate() preferred_multiple = %s\n", commaint(H->preferred_multiple));
}
if (H->num_scats > RS_MAX_NUM_SCATS) {
rsprint("Number of scatterers exceed the maximum allowed. (%s > %s).\n", commaint(H->num_scats), commaint(RS_MAX_NUM_SCATS));
exit(EXIT_FAILURE);
}
size_t max_var_size;
CL_CHECK(clGetDeviceInfo(H->workers[0].dev, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(max_var_size), &max_var_size, NULL));
if (H->workers[0].num_scats * sizeof(cl_float4) > max_var_size) {
rsprint("ERROR: Every scatterer attribute occupies %s B > %s B.", commaint(H->workers[0].num_scats * sizeof(cl_float4)), commaint(max_var_size));
exit(EXIT_FAILURE);
}
// Set an LES field if there isn't one set before
if (H->L == NULL) {
RS_set_vel_data_to_config(H, LESConfigSuctionVortices);
}
// Set a scanning strategy if none has been provided
if (H->P == NULL) {
rsprint("RS_populate() Scan pattern does not exist. Assume a PPI.\n");
RS_set_scan_pattern(H, POS_init());
}
// Set a box if it has not been set
if (H->num_anchors == 0) {
if (H->verb) {
rsprint("No scan box defined. Using scan strategy to derive the scan box.\n");
}
RSBox box = RS_suggest_scan_domain(H);
//rsprint("Suggested box size = %.2f x %.2f x %.2f\n", box.size.r, box.size.a, box.size.e);
RS_set_scan_box(H, box);
}
// These should be identical
if (H->workers[0].adm_count != H->workers[0].rcs_count) {
rsprint("ADM & RCS are not consistent. Unexpected behavior may happen.\n");
}
// Use some default tables if there aren't any set
if (H->workers[0].adm_count == 0) {
RS_set_adm_data_to_unity(H);
}
if (H->workers[0].rcs_count == 0) {
RS_set_rcs_data_to_unity(H);
}
if (H->status & RSStatusDomainPopulated) {
rsprint("WARNING. Simulation was populated.");
exit(EXIT_FAILURE);
}
//
// CPU memory allocation
//
if (H->scat_pos != NULL) {
RS_free_scat_memory(H);
}
posix_memalign((void **)&H->scat_uid, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_uint4));
posix_memalign((void **)&H->scat_pos, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_float4));
posix_memalign((void **)&H->scat_vel, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_float4));
posix_memalign((void **)&H->scat_ori, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_float4));
posix_memalign((void **)&H->scat_tum, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_float4));
posix_memalign((void **)&H->scat_aux, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_float4));
posix_memalign((void **)&H->scat_rcs, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_float4));
posix_memalign((void **)&H->scat_sig, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_float4));
posix_memalign((void **)&H->scat_rnd, RS_ALIGN_SIZE, H->num_scats * sizeof(cl_uint4));
posix_memalign((void **)&H->pulse, RS_ALIGN_SIZE, H->params.range_count * sizeof(cl_float4));
if (H->scat_uid == NULL ||
H->scat_pos == NULL ||
H->scat_vel == NULL ||
H->scat_ori == NULL ||
H->scat_tum == NULL ||
H->scat_aux == NULL ||
H->scat_rcs == NULL ||
H->scat_sig == NULL ||
H->scat_rnd == NULL ||
H->pulse == NULL) {
rsprint("ERROR: Unable to allocate memory space for scatterers.");
return;
}
memset(H->scat_aux, 0, H->num_scats * sizeof(cl_float4));
memset(H->scat_sig, 0, H->num_scats * sizeof(cl_float4));
H->mem_size = H->num_scats * (8 * sizeof(cl_float4) + 2 * sizeof(cl_uint4)) + H->params.range_count * sizeof(cl_float4);
char has_null = 0;
for (i = 0; i < H->num_workers; i++) {
posix_memalign((void **)&H->pulse_tmp[i], RS_ALIGN_SIZE, H->params.range_count * sizeof(cl_float4));
has_null |= H->pulse_tmp[i] == NULL;
H->mem_size += H->params.range_count * sizeof(cl_float4);
}
if (has_null) {
rsprint("ERROR: Unable to allocate memory space for pulses.");
return;
}
// Get the available memory of the host
#if defined(_SC_PHYS_PAGES)
long mem_pages = sysconf(_SC_PHYS_PAGES);
long mem_page_size = sysconf(_SC_PAGE_SIZE);
size_t host_mem = mem_pages * mem_page_size;
#else
uint64_t mem;
size_t len = sizeof(mem);
sysctlbyname("hw.memsize", &mem, &len, NULL, 0);
size_t host_mem = mem;
#endif
if (H->mem_size > host_mem / 4 * 3) {
rsprint("WARNING: High host memory usage: %s GB out of %s GB.", commafloat((float)H->mem_size * 1.0e-9f), commafloat((float)host_mem * 1.0e-9f));
} else if (H->verb) {
if (H->mem_size > (size_t)1.0e9f) {
rsprint("CPU memory usage = %s GB out of %s GB", commafloat((float)H->mem_size * 1.0e-9f), commafloat((float)host_mem * 1.0e-9f));
} else {
rsprint("CPU memory usage = %s MB out of %s MB", commafloat((float)H->mem_size * 1.0e-6f), commafloat((float)host_mem * 1.0e-6f));
}
}
// Update scatterer origin and offset of each worker
RS_update_origins_offsets(H);
// Initialize the scatter body positions on CPU, will upload to the GPU later
srand(H->random_seed);
RSVolume domain = RS_get_domain(H);
uint32_t uid = 0;
if (H->sim_concept & RSSimulationConceptFixedScattererPosition) {
if (H->num_types > 1) {
rsprint("WARNING. Debris particles are not emulated in RSSimulationConceptFixedScattererPosition mode.\n");
}
rsprint("RS_populate() num_workers = %d\n", H->num_workers);
const RSfloat r_lo = sqrtf(H->anchor_pos[0].x * H->anchor_pos[0].x + H->anchor_pos[0].y * H->anchor_pos[0].y + H->anchor_pos[0].z * H->anchor_pos[0].z);
// Anchor points alternate between the low and high. First plan can be retrieved from the lower portion
i = 0;
for (k = 0; k < H->num_anchors - 1; k++) {
if (k % 2 == 0) {
H->scat_pos[i].x = H->anchor_pos[k].x / r_lo * H->params.range_start;
H->scat_pos[i].y = H->anchor_pos[k].y / r_lo * H->params.range_start;
H->scat_pos[i].z = H->anchor_pos[k].z / r_lo * H->params.range_start;
H->scat_pos[i].w = 0.0f;
i++;
}
}
if (i != (H->num_anchors - 1) / 2) {
rsprint("WARNING. Inconsistency detected. H->num_anchors = %lu != %d\n", H->num_anchors, i);
}
const int anchors_per_layer = i;
// Use the first plane and duplicate to the rest of the volume
i = 0;
for (n = 0; n < H->params.range_count; n++) {
for (k = 0; k < anchors_per_layer; k++) {
H->scat_uid[i].s0 = uid++;
H->scat_uid[i].s1 = (cl_uint)H->num_scats;
H->scat_uid[i].s2 = k;
H->scat_uid[i].s3 = 0;
H->scat_pos[i].x = H->scat_pos[k].x / H->params.range_start * (H->params.range_start + (float)n * H->params.range_delta);
H->scat_pos[i].y = H->scat_pos[k].y / H->params.range_start * (H->params.range_start + (float)n * H->params.range_delta);
H->scat_pos[i].z = H->scat_pos[k].z / H->params.range_start * (H->params.range_start + (float)n * H->params.range_delta);
H->scat_pos[i].w = 0.0f;
H->scat_aux[i].s0 = 0.0f; // range
H->scat_aux[i].s1 = (float)rand() / RAND_MAX; // age
H->scat_aux[i].s2 = 0.0f; // dsd bin index
H->scat_aux[i].s3 = 1.0f; // angular weight [0.0, 1.0]
H->scat_vel[i].x = 0.0f; // u component of velocity
H->scat_vel[i].y = 0.0f; // v component of velocity
H->scat_vel[i].z = 0.0f; // w component of velocity
H->scat_vel[i].w = 0.0f; // n/a
// At the reference
H->scat_ori[i].x = 0.0f; // x of quaternion
H->scat_ori[i].y = 0.0f; // y of quaternion
H->scat_ori[i].z = 0.0f; // z of quaternion
H->scat_ori[i].w = 1.0f; // w of quaternion
// Tumbling vector for orientation update
H->scat_tum[i].x = 0.0f; // x of quaternion
H->scat_tum[i].y = 0.0f; // y of quaternion
H->scat_tum[i].z = 0.0f; // z of quaternion
H->scat_tum[i].w = 1.0f; // w of quaternion
// Initial return from each point
H->scat_rcs[i].s0 = 1.0e-10f; // sh_real of rcs
H->scat_rcs[i].s1 = 0.0f; // sh_imag of rcs
H->scat_rcs[i].s2 = 1.0e-10f; // rcs.s2 = cn2
H->scat_rcs[i].s3 = (float)rand() / RAND_MAX * 2.0 * M_PI; // rcs.s3 = phi (accumulated phase)
// Random seeds
H->scat_rnd[i].s0 = rand(); // random seed
H->scat_rnd[i].s1 = rand(); // random seed
H->scat_rnd[i].s2 = rand(); // random seed
H->scat_rnd[i].s3 = rand(); // random seed
i++;
}
}
} else {
//
// Initialize the scatter body positions & velocities
//
for (k = 0; k < H->num_types; k++) {
for (w = 0; w < H->num_workers; w++) {
i = (int)(H->offset[w] + H->workers[w].origins[k]);
#ifdef DEBUG_HEAVY
rsprint(RS_INDENT "type[%d] workers[%d] n = %d", k, w, H->workers[w].counts[k]);
#endif
for (n = 0; n < H->workers[w].counts[k]; n++) {
H->scat_uid[i].s0 = uid++;
H->scat_uid[i].s1 = n;
H->scat_uid[i].s2 = k;
H->scat_uid[i].s3 = w;
H->scat_pos[i].x = (float)rand() / RAND_MAX * domain.size.x + domain.origin.x;
H->scat_pos[i].y = (float)rand() / RAND_MAX * domain.size.y + domain.origin.y;
//H->scat_pos[i].z = (float)rand() / RAND_MAX * domain.size.z + domain.origin.z;
H->scat_pos[i].z = (float)rand() / RAND_MAX * 10.0f + domain.origin.z;
//H->scat_pos[i].z = 20.0f;
H->scat_pos[i].w = 0.0f; // Use this to store drop radius in m
H->scat_aux[i].s0 = 0.0f; // range
H->scat_aux[i].s1 = (float)rand() / RAND_MAX; // age
H->scat_aux[i].s2 = 0.0f; // dsd bin index
H->scat_aux[i].s3 = 1.0f; // angular weight [0.0, 1.0]
H->scat_vel[i].x = 0.0f; // u component of velocity
H->scat_vel[i].y = 0.0f; // v component of velocity
H->scat_vel[i].z = 0.0f; // w component of velocity
H->scat_vel[i].w = 0.0f; // n/a
// At the reference
H->scat_ori[i].x = 0.0f; // x of quaternion
H->scat_ori[i].y = 0.0f; // y of quaternion
H->scat_ori[i].z = 0.0f; // z of quaternion
H->scat_ori[i].w = 1.0f; // w of quaternion
#if defined(QUAT_INIT_FACE_SKY)
// Facing the sky
H->scat_ori[i].x = 0.0f; // x of quaternion
H->scat_ori[i].y = -0.707106781186547f; // y of quaternion
H->scat_ori[i].z = 0.0f; // z of quaternion
H->scat_ori[i].w = 0.707106781186548f; // w of quaternion
#elif defined(QUAT_INIT_OTHER)
// Some other tests
H->scat_ori[i].x = 0.5f; // x of quaternion
H->scat_ori[i].y = -0.5f; // y of quaternion
H->scat_ori[i].z = 0.5f; // z of quaternion
H->scat_ori[i].w = 0.5f; // w of quaternion
#elif defined(QUAT_INIT_ROTATE_THETA)
// Rotate by theta
float theta = -70.0f / 180.0f * M_PI;
H->scat_ori[i].x = 0.0f;
H->scat_ori[i].y = sinf(0.5f * theta);
H->scat_ori[i].z = 0.0f;
H->scat_ori[i].w = cosf(0.5f * theta);
#endif
// Facing the beam
H->scat_ori[i].x = 0.5f; // x of quaternion
H->scat_ori[i].y = -0.5f; // y of quaternion
H->scat_ori[i].z = -0.5f; // z of quaternion
H->scat_ori[i].w = 0.5f; // w of quaternion
// Tumbling vector for orientation update
H->scat_tum[i].x = 0.0f; // x of quaternion
H->scat_tum[i].y = 0.0f; // y of quaternion
H->scat_tum[i].z = 0.0f; // z of quaternion
H->scat_tum[i].w = 1.0f; // w of quaternion
// Initial return from each point
H->scat_rcs[i].s0 = 1.0f; // sh_real of rcs
H->scat_rcs[i].s1 = 0.0f; // sh_imag of rcs
H->scat_rcs[i].s2 = 1.0f; // sv_real of rcs
H->scat_rcs[i].s3 = 0.0f; // sv_imag of rcs
// Random seeds
H->scat_rnd[i].s0 = rand(); // random seed
H->scat_rnd[i].s1 = rand(); // random seed
H->scat_rnd[i].s2 = rand(); // random seed
H->scat_rnd[i].s3 = rand(); // random seed
i++;
}
} // for (w = 0; w < H->num_workers; w++) ...
} // for (k = 0; k < H->num_types; k++) ...
// Volume of the simulation domain (m^3)
float vol = H->sim_desc.s[RSSimulationDescriptionBoundSizeX] * H->sim_desc.s[RSSimulationDescriptionBoundSizeY] * H->sim_desc.s[RSSimulationDescriptionBoundSizeZ];
// Re-initialize random seed
srand(H->random_seed + H->random_seed);
// Parameterized drop radius as scat_pos.w if DSD has been set
// May want to add maximum relaxation time of each drop size
// Potential places: vel.w, aux.s2
float a;
int bin;
if (H->dsd_name != RSDropSizeDistributionUndefined) {
float drops_per_scat = (vol * H->dsd_nd_sum) / H->counts[0];
sprintf(H->summary + strlen(H->summary), "Drops / scatterer = %s (%s / %s)\n", commafloat(drops_per_scat), commafloat((vol * H->dsd_nd_sum)), commaint(H->counts[0]));
rsprint("Drops / scatterer = %s (%s / %s)\n", commafloat(drops_per_scat), commafloat((vol * H->dsd_nd_sum)), commaint(H->counts[0]));
// Store a copy of concentration scale in simulation description
H->sim_desc.s[RSSimulationDescriptionDropConcentrationScale] = sqrt(drops_per_scat);
if (H->sim_concept & RSSimulationConceptUniformDSDScaledRCS) {
for (w = 0; w < H->num_workers; w++) {
i = (int)(H->offset[w] + H->workers[w].origins[0]);
for (n = 0; n < H->workers[w].counts[0]; n++) {
a = (float)rand() / RAND_MAX;
bin = (int)(a * (float)H->dsd_count);
H->dsd_pop[bin]++;
H->scat_pos[i].w = H->dsd_r[bin]; // set the drop radius
H->scat_aux[i].s2 = ((float)bin + 0.5f) / (float)(H->dsd_count); // set the dsd bin index (temporary)
i++;
}
}
} else {
for (w = 0; w < H->num_workers; w++) {
i = (int)(H->offset[w] + H->workers[w].origins[0]);
for (n = 0; n < H->workers[w].counts[0]; n++) {
a = (float)rand() / RAND_MAX;
k = H->dsd_count;
bin = 0;
while (k > 0) {
k--;
if (a >= H->dsd_cdf[k]) {
bin = k;
break;
}
}
H->dsd_pop[bin]++;
H->scat_pos[i].w = H->dsd_r[bin]; // set the drop radius
H->scat_aux[i].s2 = ((float)bin + 0.5f) / (float)(H->dsd_count); // set the dsd bin index
i++;
}
}
#if defined(DEBUG_DSD)
// Replace a few for debugging purpose
H->scat_pos[0].w = 0.0025f;
H->scat_pos[1].w = 0.001f;
H->scat_pos[2].w = 0.0005f;
#endif
}
sprintf(H->summary + strlen(H->summary),
"DSD specifications:\n");
for (i = 0; i < MIN(H->dsd_count - 2, 3); i++) {
sprintf(H->summary + strlen(H->summary), " o %.2f mm - P %.5f / %s particles\n", 2000.0f * H->dsd_r[i], (float)H->dsd_pop[i] / (float)H->counts[0], commaint(H->dsd_pop[i]));
}
if (H->dsd_count > 8) {
sprintf(H->summary + strlen(H->summary), " o : - : / : /\n");
sprintf(H->summary + strlen(H->summary), " o : - : / : /\n");
i = MAX(4, H->dsd_count - 1);
}
for (; i < H->dsd_count; i++) {
sprintf(H->summary + strlen(H->summary), " o %.2f mm - P %.5f / %s particles\n", 2000.0f * H->dsd_r[i], (float)H->dsd_pop[i] / (float)H->counts[0], commaint(H->dsd_pop[i]));
}
if (H->verb) {
rsprint("Actual DSD specifications:");
for (i = 0; i < MIN(H->dsd_count - 2, 3); i++) {
printf(RS_INDENT "o %.2f mm - PDF %.5f / %.5f / %s particles\n", 2000.0f * H->dsd_r[i], H->dsd_pdf[i], (float)H->dsd_pop[i] / (float)H->counts[0], commaint(H->dsd_pop[i]));
}
if (H->dsd_count > 8) {
printf(RS_INDENT "o : - : / : /\n");
printf(RS_INDENT "o : - : / : /\n");
i = MAX(4, H->dsd_count - 1);
}
for (; i < H->dsd_count; i++) {
printf(RS_INDENT "o %.2f mm - PDF %.5f / %.5f / %s particles\n", 2000.0f * H->dsd_r[i], H->dsd_pdf[i], (float)H->dsd_pop[i] / (float)H->counts[0], commaint(H->dsd_pop[i]));
}
}
} else {
rsprint("INFO: No DSD specified. The meteorological scatterers do not return any power.");
float drops_per_scat = (vol * 1000.0f) / H->counts[0];
sprintf(H->summary + strlen(H->summary), "Drops / scatterer = %s (%s / %s)\n", commafloat(drops_per_scat), commafloat((vol * H->dsd_nd_sum)), commaint(H->counts[0]));
rsprint("Drops / scatterer = %s (%s / %s)\n", commafloat(drops_per_scat), commafloat((vol * H->dsd_nd_sum)), commaint(H->counts[0]));
}
} // if (H->sim_concept & RSSimulationConceptFixedScattererPosition) ...
#if defined(DEBUG_RCS)
// Replace a few points for debugging purpose.
H->scat_pos[0].x = domain.origin.x + 0.5f * domain.size.x;
H->scat_pos[0].y = domain.origin.y + 0.5f * domain.size.y;
H->scat_pos[0].z = H->scat_pos[0].y * tanf(5.0f / 180.0f * M_PI);
#endif
#if defined(DEBUG_DEBRIS)
// Replace the very first debris particle
if (H->counts[1] > 0) {
k = (int)H->counts[0];
//printf("k = %d\n", k);
H->scat_pos[k].x = 0.0f;
H->scat_pos[k].y = H->params.range_start + floorf(H->params.range_count * 0.5f) * H->params.range_delta;
H->scat_pos[k].z = 0.5f * domain.size.z;
H->scat_aux[k].s0 = H->params.range_start + floorf(H->params.range_count * 0.5f) * H->params.range_delta;
}
#endif
// Restore simulation time, default beam position at unit vector (0, 1, 0)
H->sim_tic = 0.0f;
H->sim_toc = H->vel_desc.tp;
H->sim_desc.s[RSSimulationDescriptionBeamUnitX] = 0.0f;
H->sim_desc.s[RSSimulationDescriptionBeamUnitY] = 1.0f;
H->sim_desc.s[RSSimulationDescriptionBeamUnitZ] = 0.0f;
H->sim_desc.s[RSSimulationDescriptionTotalParticles] = H->num_scats;
// Make a copy in float so we are maintaining all 32-bits
float tmpf; memcpy(&tmpf, &H->sim_concept, sizeof(float));
H->sim_desc.s[RSSimulationDescriptionConcept] = tmpf;
// Propagate / duplicate some constants to other places for efficient kernel execution
for (i = 0; i < H->num_workers; i++) {
H->workers[i].range_weight_desc.s[RSTable1DDescriptionUserConstant] = H->sim_desc.s[RSSimulationDescriptionWaveNumber];
H->workers[i].rcs_ellipsoid_desc.s[RSTable1DDescriptionUserConstant] = H->sim_desc.s[RSSimulationDescriptionDropConcentrationScale];
}
// All tables must be ready at this point
// - range weight table
// - antenna weight table
// - RCS of ellipsoid table
// - RCS of debris table
// - ADM of debris table
// - 3D wind table
// - Flux table
RS_compute_rcs_ellipsoids(H);
//
// GPU memory allocation (probably should rename this to RS_worker_kernel_setup()
//
for (i = 0; i < H->num_workers; i++) {
RS_worker_malloc(H, i);
}
#if defined (_USE_GCL_)
CGLContextObj cgl_context = CGLGetCurrentContext();
if (cgl_context == NULL) {
rsprint("ERROR: No GL context yet.");
return;
}
CGLShareGroupObj sharegroup = CGLGetShareGroup(cgl_context);
if (sharegroup == NULL) {
rsprint("ERROR: Sharegroup should have been set before.");
return;
}
rsprint("RS_populate() - context %p sharegroup %p", cgl_context, sharegroup);
gcl_gl_set_sharegroup(sharegroup);
rsprint("RS_populate() - gcl_gl_set_sharegroup() ... done");
RS_derive_ndranges(H);
#endif
// Upload the particle parameters to the GPU
RS_upload(H);
if (H->verb) {
rsprint("ADM / RCS count = %d / %d", H->workers[0].adm_count, H->workers[0].rcs_count);
rsprint("CL domain synchronized.");
}
H->status |= RSStatusDomainPopulated;
// Set initial scan position to be the very first position
RS_advance_beam(H);
// Advance time with 0 s so that all attributes kernels (bg_atts, fp_atts, el_atts, or db_atts) are called once but positions aren't updated.
H->sim_desc.s[RSSimulationDescriptionPRT] = 0.0f;
RS_advance_time(H);
RS_make_pulse(H);
if (H->verb > 2) {
RS_download(H);
RS_show_scat_att(H);
}
H->sim_desc.s[RSSimulationDescriptionPRT] = H->params.prt;
// Now we undo that sim_tic counter due to RS_advance_time()
H->sim_tic -= H->params.prt;
H->sim_desc.s[RSSimulationDescriptionSimTic] = H->sim_tic;
return;
}
void RS_download(RSHandle *H) {
int i;
#if defined (_USE_GCL_)
//printf("%p <-----------------------\n", H->scat_ori);
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
gcl_memcpy(H->scat_pos + H->offset[i], H->workers[i].scat_pos, H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->scat_vel + H->offset[i], H->workers[i].scat_vel, H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->scat_ori + H->offset[i], H->workers[i].scat_ori, H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->scat_aux + H->offset[i], H->workers[i].scat_aux, H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->scat_rcs + H->offset[i], H->workers[i].scat_rcs, H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->scat_sig + H->offset[i], H->workers[i].scat_sig, H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->pulse_tmp[i], H->workers[i].pulse, H->params.range_count * sizeof(cl_float4));
dispatch_semaphore_signal(H->workers[i].sem);
});
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
#else
int k;
cl_event events[H->num_workers][7];
// Non-blocking read, wait for events later when they are all queued up.
for (i = 0; i < H->num_workers; i++) {
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_pos, CL_FALSE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_pos + H->offset[i], 0, NULL, &events[i][0]);
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_vel, CL_FALSE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_vel + H->offset[i], 0, NULL, &events[i][1]);
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_ori, CL_FALSE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_ori + H->offset[i], 0, NULL, &events[i][2]);
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_aux, CL_FALSE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_aux + H->offset[i], 0, NULL, &events[i][3]);
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_rcs, CL_FALSE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_rcs + H->offset[i], 0, NULL, &events[i][4]);
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_sig, CL_FALSE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_sig + H->offset[i], 0, NULL, &events[i][5]);
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].pulse, CL_FALSE, 0, H->params.range_count * sizeof(cl_float4), H->pulse_tmp[i], 0, NULL, &events[i][6]);
}
cl_int ret;
for (i = 0; i < H->num_workers; i++) {
ret = clWaitForEvents(7, events[i]);
if (ret != CL_SUCCESS) {
rsprint("ERROR: Unable to properly read back the values.");
}
for (k = 0; k < 7; k++) {
clReleaseEvent(events[i][k]);
}
}
#endif
RS_merge_pulse_tmp(H);
}
void RS_download_position_only(RSHandle *H) {
int i;
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
gcl_memcpy(H->scat_pos + H->offset[i], H->workers[i].scat_pos, H->workers[i].num_scats * sizeof(cl_float4));
dispatch_semaphore_signal(H->workers[i].sem);
});
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
#else
for (i = 0; i < H->num_workers; i++) {
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_pos, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_pos + H->offset[i], 0, NULL, NULL);
}
#endif
}
void RS_download_orientation_only(RSHandle *H) {
int i;
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
gcl_memcpy(H->scat_ori + H->offset[i], H->workers[i].scat_ori, H->workers[i].num_scats * sizeof(cl_float4));
dispatch_semaphore_signal(H->workers[i].sem);
});
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
#else
for (i = 0; i < H->num_workers; i++) {
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].scat_ori, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_ori + H->offset[i], 0, NULL, NULL);
}
#endif
}
void RS_merge_pulse_tmp(RSHandle *H) {
memcpy(H->pulse, H->pulse_tmp[0], H->params.range_count * sizeof(cl_float4));
for (int i = 1; i < H->num_workers; i++) {
for (int k = 0; k < H->params.range_count; k++) {
H->pulse[k].s0 += H->pulse_tmp[i][k].s0;
H->pulse[k].s1 += H->pulse_tmp[i][k].s1;
H->pulse[k].s2 += H->pulse_tmp[i][k].s2;
H->pulse[k].s3 += H->pulse_tmp[i][k].s3;
}
}
//
// Scale the amplitude by antenna gain, tx power
// Amplitude scaling, Ga = 10 ^ (Gt / 20) * 10 ^ (Gr / 20) * sqrt(Pt)
// For dish antennas: Gt = Gr
//
// => g = 10 ^ (G / 20) * 10 ^ (G / 20) * sqrt(Pt)
// = 10 ^ (G / 10) * sqrt(Pt)
//
// Amplitude scale to 1-km referece: sqrt(R ^ 4) = R ^ 2 = 1.0e6
//
float g = powf(10.0f, 0.1f * H->params.antenna_gain_dbi) * sqrtf(H->params.tx_power_watt) / (4.0f * M_PI) * 1.0e6f;
//printf("** g = %.4e (linear unit)\n", g);
for (int k = 0; k < H->params.range_count; k++) {
H->pulse[k].s0 *= g;
H->pulse[k].s1 *= g;
H->pulse[k].s2 *= g;
H->pulse[k].s3 *= g;
}
}
void RS_download_pulse_only(RSHandle *H) {
int i;
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
gcl_memcpy(H->pulse_tmp[i], H->workers[i].pulse, H->params.range_count * sizeof(cl_float4));
dispatch_semaphore_signal(H->workers[i].sem);
});
}
for (i = 0; i < H->num_workers; i++)
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
#else
// Blocking read since there is only one read
for (i = 0; i < H->num_workers; i++) {
clEnqueueReadBuffer(H->workers[i].que, H->workers[i].pulse, CL_TRUE, 0, H->params.range_count * sizeof(cl_float4), H->pulse_tmp[i], 0, NULL, NULL);
}
#endif
RS_merge_pulse_tmp(H);
}
void RS_upload(RSHandle *H) {
int i;
if (H->verb > 3) {
for (i = 0; i < H->num_workers; i++) {
rsprint("workers[%d].scat_pos @ %p\n", i, H->workers[i].scat_pos);
rsprint("workers[%d].scat_vel @ %p\n", i, H->workers[i].scat_vel);
}
rsprint("scat_pos @ %p\n", H->scat_pos);
rsprint("scat_vel @ %p\n", H->scat_vel);
}
if (H->num_scats == 0) {
rsprint("Abort @ num_scats = 0 during RS_upload()\n");
return;
}
#if defined (_USE_GCL_)
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
gcl_memcpy(H->workers[i].scat_pos, H->scat_pos + H->offset[i], H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->workers[i].scat_vel, H->scat_vel + H->offset[i], H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->workers[i].scat_ori, H->scat_ori + H->offset[i], H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->workers[i].scat_tum, H->scat_tum + H->offset[i], H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->workers[i].scat_aux, H->scat_aux + H->offset[i], H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->workers[i].scat_rcs, H->scat_rcs + H->offset[i], H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->workers[i].scat_sig, H->scat_sig + H->offset[i], H->workers[i].num_scats * sizeof(cl_float4));
gcl_memcpy(H->workers[i].scat_rnd, H->scat_rnd + H->offset[i], H->workers[i].num_scats * sizeof(cl_uint4));
dispatch_semaphore_signal(H->workers[i].sem);
});
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
#else
// Blocking write since there is no need to optimize this too much
for (i = 0; i < H->num_workers; i++) {
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_pos, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_pos + H->offset[i], 0, NULL, NULL);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_vel, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_vel + H->offset[i], 0, NULL, NULL);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_ori, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_ori + H->offset[i], 0, NULL, NULL);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_tum, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_tum + H->offset[i], 0, NULL, NULL);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_aux, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_aux + H->offset[i], 0, NULL, NULL);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_rcs, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_rcs + H->offset[i], 0, NULL, NULL);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_sig, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_float4), H->scat_sig + H->offset[i], 0, NULL, NULL);
clEnqueueWriteBuffer(H->workers[i].que, H->workers[i].scat_rnd, CL_TRUE, 0, H->workers[i].num_scats * sizeof(cl_uint4), H->scat_rnd + H->offset[i], 0, NULL, NULL);
}
#endif
}
void RS_advance_time(RSHandle *H) {
int i, k;
int r, a;
if (!(H->status & RSStatusDomainPopulated)) {
rsprint("ERROR: Simulation domain not yet populated.");
return;
}
// Advance to next wind table when the time comes
if (H->sim_tic >= H->sim_toc) {
H->sim_toc += H->vel_desc.tp;
if (H->vel_idx == 0) {
rsprint("Wind table restarted.");
}
for (i = 0; i < H->num_workers; i++) {
H->workers[i].les_id = H->workers[i].les_id == 1 ? 0 : 1;
}
RS_set_vel_data_to_LES_table(H, LES_get_frame(H->L, H->vel_idx));
H->vel_idx = H->vel_idx == H->vel_count - 1 ? 0 : H->vel_idx + 1;
if (H->verb > 2) {
rsprint("Wind table advanced. vel_idx = %d ( tp = %.2f / prt = %.4f ) vel_id = %d", H->vel_idx, H->vel_desc.tp, H->params.prt, H->workers[0].les_id);
}
}
#if defined (_USE_GCL_)
#if defined (_DUMMY_)
i = 0;
k = 0;
r = 0;
a = 0;
dispatch_async(H->workers[i].que, ^{
dummy_kernel(&H->workers[i].ndrange_scat_all,
(cl_float4 *)H->workers[i].scat_pos,
(cl_float4 *)H->workers[i].scat_ori,
(cl_float4 *)H->workers[i].scat_rcs,
(cl_image)H->workers[i].rcs_real[r],
(cl_image)H->workers[i].rcs_imag[r],
H->workers[i].rcs_desc[r],
H->sim_desc);
dispatch_semaphore_signal(H->workers[i].sem);
});
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
#elif defined (_ALL_DEBRIS_)
i = 0;
r = 0;
a = 0;
dispatch_async(H->workers[i].que, ^{
db_atts_kernel(&H->workers[i].ndrange_scat_all,
(cl_float4 *)H->workers[i].scat_pos,
(cl_float4 *)H->workers[i].scat_ori,
(cl_float4 *)H->workers[i].scat_vel,
(cl_float4 *)H->workers[i].scat_tum,
(cl_float4 *)H->workers[i].scat_sig,
(cl_uint4 *)H->workers[i].scat_rnd,
(cl_image)H->workers[i].vel[H->workers[i].vel_id],
H->workers[i].vel_desc,
(cl_image)H->workers[i].adm_cd[a],
(cl_image)H->workers[i].adm_cm[a],
H->workers[i].adm_desc[a],
(cl_image)H->workers[i].rcs_real[r],
(cl_image)H->workers[i].rcs_imag[r],
H->workers[i].rcs_desc[r],
H->sim_desc);
dispatch_semaphore_signal(H->workers[i].sem);
});
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
#else
// These kernels are actually independent and, thus, can be parallelized.
for (i = 0; i < H->num_workers; i++) {
dispatch_async(H->workers[i].que, ^{
if (H->sim_concept & RSSimulationConceptDraggedBackground) {
el_atts_kernel(&H->workers[i].ndrange_scat[0],
(cl_float4 *)H->workers[i].scat_pos,
(cl_float4 *)H->workers[i].scat_vel,
(cl_float4 *)H->workers[i].scat_rcs,
(cl_uint4 *)H->workers[i].scat_rnd,
(cl_image)H->workers[i].les_uvwt[H->workers[i].les_id],
(cl_image)H->workers[i].les_cpxx[H->workers[i].les_id],
H->workers[i].les_desc,
(cl_float4 *)H->workers[i].rcs_ellipsoid,
H->workers[i].rcs_ellipsoid_desc,
H->sim_desc);
} else {
bg_atts_kernel(&H->workers[i].ndrange_scat[0],
(cl_float4 *)H->workers[i].scat_pos,
(cl_float4 *)H->workers[i].scat_vel,
(cl_float4 *)H->workers[i].scat_rcs,
(cl_uint4 *)H->workers[i].scat_rnd,
(cl_image)H->workers[i].les_uvwt[H->workers[i].les_id],
(cl_image)H->workers[i].les_cpxx[H->workers[i].les_id],
H->workers[i].les_desc,
(cl_float4 *)H->workers[i].rcs_ellipsoid,
H->workers[i].rcs_ellipsoid_desc,
H->sim_desc);
}
dispatch_semaphore_signal(H->workers[i].sem);
});
r = 0;
a = 0;
for (k = 1; k < RS_MAX_DEBRIS_TYPES; k++) {
if (H->workers[i].counts[k]) {
dispatch_async(H->workers[i].que, ^{
db_atts_kernel(&H->workers[i].ndrange_scat[k],
(cl_float4 *)H->workers[i].scat_pos,
(cl_float4 *)H->workers[i].scat_ori,
(cl_float4 *)H->workers[i].scat_vel,
(cl_float4 *)H->workers[i].scat_tum,
(cl_float4 *)H->workers[i].scat_rcs,
(cl_uint4 *)H->workers[i].scat_rnd,
(cl_image)H->workers[i].les_uvwt[H->workers[i].les_id],
H->workers[i].les_desc,
(cl_image)H->workers[i].adm_cd[a],
(cl_image)H->workers[i].adm_cm[a],
H->workers[i].adm_desc[a],
(cl_image)H->workers[i].rcs_real[r],
(cl_image)H->workers[i].rcs_imag[r],
H->workers[i].rcs_desc[r],
H->sim_desc);
dispatch_semaphore_signal(H->workers[i].sem);
});
}
r = r == H->workers[i].rcs_count - 1 ? 0 : r + 1;
a = a == H->workers[i].adm_count - 1 ? 0 : a + 1;
}
}
for (i = 0; i < H->num_workers; i++) {
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
for (k = 1; k < RS_MAX_DEBRIS_TYPES; k++) {
if (H->workers[i].counts[k]) {
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
}
}
#endif
#else
cl_event events[RS_MAX_GPU_DEVICE][H->num_types];
memset(events, 0, sizeof(events));
for (i = 0; i < H->num_workers; i++) {
r = 0;
a = 0;
// A convenient pointer to reduce dereferencing
RSWorker *C = &H->workers[i];
// Need to refresh some parameters of the background at each time update
if (H->sim_concept & RSSimulationConceptDraggedBackground) {
clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[C->les_id]);
clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[C->les_id]);
clSetKernelArg(C->kern_el_atts, RSBackgroundAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_el_atts, 1, &C->origins[0], &C->counts[0], NULL, 0, NULL, &events[i][0]);
} else if (H->sim_concept & RSSimulationConceptFixedScattererPosition) {
clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[C->les_id]);
clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[C->les_id]);
clSetKernelArg(C->kern_fp_atts, RSBackgroundAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_fp_atts, 1, &C->origins[0], &C->counts[0], NULL, 0, NULL, &events[i][0]);
} else {
clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[C->les_id]);
clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[C->les_id]);
clSetKernelArg(C->kern_bg_atts, RSBackgroundAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_bg_atts, 1, &C->origins[0], &C->counts[0], NULL, 0, NULL, &events[i][0]);
}
// Debris particles
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentBackgroundVelocity, sizeof(cl_mem), &C->les_uvwt[C->les_id]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentBackgroundCn2Pressure, sizeof(cl_mem), &C->les_cpxx[C->les_id]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentDebrisFluxField, sizeof(cl_mem), &C->dff_icdf[C->les_id]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentDebrisFluxFieldDescription, sizeof(cl_float16), &C->dff_desc);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
for (k = 1; k < H->num_types; k++) {
if (C->counts[k]) {
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentAirDragModelDrag, sizeof(cl_mem), &C->adm_cd[a]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentAirDragModelMomentum, sizeof(cl_mem), &C->adm_cm[a]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentAirDragModelDescription, sizeof(cl_float16), &C->adm_desc[a]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRadarCrossSectionReal, sizeof(cl_mem), &C->rcs_real[r]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRadarCrossSectionImag, sizeof(cl_mem), &C->rcs_imag[r]);
clSetKernelArg(C->kern_db_atts, RSDebrisAttributeKernelArgumentRadarCrossSectionDescription, sizeof(cl_float16), &C->rcs_desc[r]);
clEnqueueNDRangeKernel(C->que, C->kern_db_atts, 1, &C->origins[k], &C->counts[k], NULL, 0, NULL, &events[i][k]);
}
r = r == H->workers[i].rcs_count - 1 ? 0 : r + 1;
a = a == H->workers[i].adm_count - 1 ? 0 : a + 1;
}
}
for (i = 0; i < H->num_workers; i++) {
clFlush(H->workers[i].que);
}
for (i = 0; i < H->num_workers; i++) {
for (k = 0; k < H->num_types; k++) {
if (H->workers[i].counts[k]) {
clWaitForEvents(1, &events[i][k]);
clReleaseEvent(events[i][k]);
}
}
}
#endif
H->sim_tic += H->params.prt;
H->sim_desc.s[RSSimulationDescriptionSimTic] = H->sim_tic;
H->status |= RSStatusScattererSignalNeedsUpdate;
}
void RS_advance_beam(RSHandle *H) {
POSPattern *scan = H->P;
POS_get_next_angles(scan);
RS_set_beam_pos(H, scan->az, scan->el);
}
void RS_make_pulse(RSHandle *H) {
int i, k;
int r, a;
if (!(H->status & RSStatusDomainPopulated)) {
rsprint("ERROR: Simulation domain not populated.");
return;
}
#if defined (_USE_GCL_)
if (H->status & RSStatusDebrisRCSNeedsUpdate) {
for (i = 0; i < H->num_workers; i++) {
r = 0;
a = 0;
RSWorker *C = &H->workers[i];
for (k = 1; k < H->num_types; k++) {
if (C->counts[k]) {
dispatch_async(C->que, ^{
db_rcs_kernel(&C->ndrange_scat[k],
(cl_float4 *)C->scat_pos,
(cl_float4 *)C->scat_ori,
(cl_float4 *)C->scat_rcs,
(cl_image)H->workers[i].rcs_real[r],
(cl_image)H->workers[i].rcs_imag[r],
H->workers[i].rcs_desc[r],
H->sim_desc);
dispatch_semaphore_signal(C->sem);
});
}
r = r == H->workers[i].rcs_count - 1 ? 0 : r + 1;
a = a == H->workers[i].adm_count - 1 ? 0 : a + 1;
}
}
for (i = 0; i < H->num_workers; i++) {
RSWorker *C = &H->workers[i];
for (k = 1; k < H->num_types; k++) {
if (C->counts[k]) {
dispatch_semaphore_wait(C->sem, DISPATCH_TIME_FOREVER);
}
}
}
for (i = 0; i < H->num_workers; i++) {
RSWorker *C = &H->workers[i];
dispatch_async(C->que, ^{
scat_sig_aux_kernel(&C->ndrange_scat_all,
(cl_float4 *)C->scat_sig,
(cl_float4 *)C->scat_aux,
(cl_float4 *)C->scat_pos,
(cl_float4 *)C->scat_rcs,
(cl_float *)C->angular_weight,
C->angular_weight_desc,
H->sim_desc);
dispatch_semaphore_signal(C->sem);
});
}
for (i = 0; i < H->num_workers; i++) {
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
}
for (i = 0; i < H->num_workers; i++) {
RSWorker *C = &H->workers[i];
dispatch_async(C->que, ^{
if (H->status & RSStatusScattererSignalNeedsUpdate) {
//printf("RS_make_pulse: kern_scat_sig_aux\n");
scat_sig_aux_kernel(&C->ndrange_scat_all,
(cl_float4 *)C->scat_sig,
(cl_float4 *)C->scat_aux,
(cl_float4 *)C->scat_pos,
(cl_float4 *)C->scat_rcs,
(cl_float *)C->angular_weight,
C->angular_weight_desc,
H->sim_desc);
}
make_pulse_pass_1_kernel(&C->ndrange_pulse_pass_1,
(cl_float4 *)C->work,
(cl_float4 *)C->scat_sig,
(cl_float4 *)C->scat_aux,
C->make_pulse_params.local_mem_size[0],
(cl_float *)C->range_weight,
C->range_weight_desc,
C->make_pulse_params.range_start,
C->make_pulse_params.range_delta,
C->make_pulse_params.range_count,
C->make_pulse_params.group_counts[0],
C->make_pulse_params.entry_counts[0]);
switch (C->make_pulse_params.cl_pass_2_method) {
case RS_CL_PASS_2_IN_LOCAL:
make_pulse_pass_2_local_kernel(&C->ndrange_pulse_pass_2,
(cl_float4 *)C->pulse,
(cl_float4 *)C->work,
C->make_pulse_params.local_mem_size[1],
C->make_pulse_params.range_count,
C->make_pulse_params.entry_counts[1]);
break;
case RS_CL_PASS_2_IN_RANGE:
make_pulse_pass_2_range_kernel(&C->ndrange_pulse_pass_2,
(cl_float4 *)C->pulse,
(cl_float4 *)C->work,
C->make_pulse_params.local_mem_size[1],
C->make_pulse_params.range_count,
C->make_pulse_params.entry_counts[1]);
break;
default:
make_pulse_pass_2_group_kernel(&C->ndrange_pulse_pass_2,
(cl_float4 *)C->pulse,
(cl_float4 *)C->work,
C->make_pulse_params.local_mem_size[1],
C->make_pulse_params.range_count,
C->make_pulse_params.entry_counts[1]);
break;
}
dispatch_semaphore_signal(C->sem);
});
}
for (i = 0; i < H->num_workers; i++) {
dispatch_semaphore_wait(H->workers[i].sem, DISPATCH_TIME_FOREVER);
}
#else
cl_event events[H->num_workers][MAX(H->num_types, 3)];
memset(events, 0, sizeof(events));
// In this implementation, kern_make_pulse_pass_2 should point to kern_make_pulse_pass_2_group, kern_make_pulse_pass_2_local or kern_make_pulse_pass_2_range,
// which had been selected based on the group size in RS_make_pulse_params()
if (H->status & RSStatusDebrisRCSNeedsUpdate) {
// Update all the debris RCS
for (i = 0; i < H->num_workers; i++) {
r = 0;
a = 0;
RSWorker *C = &H->workers[i];
for (k = 1; k < H->num_types; k++) {
if (C->counts[k]) {
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionReal, sizeof(cl_mem), &C->rcs_real[r]);
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionImag, sizeof(cl_mem), &C->rcs_imag[r]);
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentRadarCrossSectionDescription, sizeof(cl_float16), &C->rcs_desc[r]);
clSetKernelArg(C->kern_db_rcs, RSDebrisRCSKernelArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_db_rcs, 1, &C->origins[k], &C->counts[k], NULL, 0, NULL, &events[i][k]);
}
r = r == H->workers[i].rcs_count - 1 ? 0 : r + 1;
a = a == H->workers[i].adm_count - 1 ? 0 : a + 1;
}
}
for (i = 0; i < H->num_workers; i++) {
clFlush(H->workers[i].que);
}
for (i = 0; i < H->num_workers; i++) {
for (k = 1; k < H->num_types; k++) {
if (H->workers[i].counts[k]) {
clWaitForEvents(1, &events[i][k]);
clReleaseEvent(events[i][k]);
}
}
}
H->status |= RSStatusScattererSignalNeedsUpdate;
}
for (i = 0; i < H->num_workers; i++) {
RSWorker *C = &H->workers[i];
if (H->status & RSStatusScattererSignalNeedsUpdate) {
//printf("RS_make_pulse() kern_scat_sig_aux : %zu sim_tic = %.4f\n", C->num_scats, H->sim_tic);
clSetKernelArg(C->kern_scat_sig_aux, RSScattererAngularWeightKernalArgumentSimulationDescription, sizeof(cl_float16), &H->sim_desc);
clEnqueueNDRangeKernel(C->que, C->kern_scat_sig_aux, 1, NULL, &C->num_scats, NULL, 0, NULL, &events[i][0]);
clEnqueueNDRangeKernel(C->que, C->kern_make_pulse_pass_1, 1, NULL, &C->make_pulse_params.global[0], &C->make_pulse_params.local[0], 1, &events[i][0], &events[i][1]);
} else {
clEnqueueNDRangeKernel(C->que, C->kern_make_pulse_pass_1, 1, NULL, &C->make_pulse_params.global[0], &C->make_pulse_params.local[0], 0, NULL, &events[i][1]);
}
clEnqueueNDRangeKernel(C->que, C->kern_make_pulse_pass_2, 1, NULL, &C->make_pulse_params.global[1], &C->make_pulse_params.local[1], 1, &events[i][1], &events[i][2]);
}
for (i = 0; i < H->num_workers; i++) {
clFlush(H->workers[i].que);
}
for (i = 0; i < H->num_workers; i++) {
clWaitForEvents(1, &events[i][2]);
if (H->status & RSStatusScattererSignalNeedsUpdate)
clReleaseEvent(events[i][0]);
clReleaseEvent(events[i][1]);
clReleaseEvent(events[i][2]);
}
#endif
H->status &= ~RSStatusDebrisRCSNeedsUpdate;
H->status &= ~RSStatusScattererSignalNeedsUpdate;
}
#pragma mark -
#pragma mark Elements for table lookup
RSTable RS_table_init(size_t numel) {
RSTable table = {0.0f, 1.0f, 1.0f, 0, NULL};
if (posix_memalign((void **)&table.data, RS_ALIGN_SIZE, numel * sizeof(float))) {
rsprint("ERROR: Unable to allocate an RSTable->data.\n", now());
return table;
}
return table;
}
void RS_table_free(RSTable T) {
if (T.data != NULL) {
free(T.data);
T.data = NULL;
}
}
RSTable2D RS_table2d_init(size_t numel) {
RSTable2D table;
table.xs = 1.0f; table.ys = 1.0f;
table.xo = 0.0f; table.yo = 0.0f;
table.xm = 1.0f; table.ym = 1.0f;
if (posix_memalign((void **)&table.data, RS_ALIGN_SIZE, numel * sizeof(cl_float4))) {
rsprint("ERROR: Unable to allocate an RSTable2D->data.\n", now());
return table;
}
return table;
}
void RS_table2d_free(RSTable2D T) {
if (T.data != NULL) {
free(T.data);
T.data = NULL;
}
}
RSTable3D RS_table3d_init(size_t numel) {
RSTable3D table;
table.spacing = RSTableSpacingUniform;
table.xs = 1.0f; table.ys = 1.0f; table.zs = 1.0f;
table.xo = 0.0f; table.yo = 0.0f; table.zo = 0.0f;
table.xm = 1.0f; table.ym = 1.0f; table.zm = 1.0f;
if (posix_memalign((void **)&table.uvwt, RS_ALIGN_SIZE, numel * sizeof(cl_float4))) {
rsprint("ERROR: Unable to allocate an RSTable3D->uvwt.\n", now());
return table;
}
if (posix_memalign((void **)&table.cpxx, RS_ALIGN_SIZE, numel * sizeof(cl_float4))) {
rsprint("ERROR: Unable to allocate an RSTable3D->cpxx.\n", now());
return table;
}
return table;
}
void RS_table3d_free(RSTable3D T) {
if (T.uvwt != NULL) {
free(T.uvwt);
T.uvwt = NULL;
}
if (T.cpxx != NULL) {
free(T.cpxx);
T.cpxx = NULL;
}
}
#pragma mark -
#pragma mark Display
void RS_show_radar_params(RSHandle *H) {
rsprint("Radar Parameters:\n");
printf(RS_INDENT "o beamwidth = %4.2f deg o dr = %.2f m\n", H->params.antenna_bw_deg, H->params.dr);
printf(RS_INDENT "o lambda = %4.2f m o PRT = %.2f ms o va = %.2f m/s\n", H->params.lambda, H->params.prt * 1.0e3f, H->params.va);
}
static void RS_show_scat_i(RSHandle *H, const size_t i) {
printf(" [%7d %7d %5d %d] p( %9.2f, %9.2f, %9.2f, %4.1f ) v( %7.2f %7.2f %7.2f ) o( %7.4f %7.4f %7.4f %7.4f)\n",
H->scat_uid[i].x, H->scat_uid[i].y, H->scat_uid[i].z, H->scat_uid[i].w,
H->scat_pos[i].x, H->scat_pos[i].y, H->scat_pos[i].z, 2000.0f * H->scat_pos[i].w,
H->scat_vel[i].x, H->scat_vel[i].y, H->scat_vel[i].z,
H->scat_ori[i].x, H->scat_ori[i].y, H->scat_ori[i].z, H->scat_ori[i].w);
}
static void RS_show_rcs_i(RSHandle *H, const size_t i) {
printf(" [%7d %7d %5d %d] s( %10.3e, %10.3e, %10.3e, %10.3e ) x( %10.3e %10.3e %10.3e %10.3e ) r = %.2f m d = %.1f mm\n",
H->scat_uid[i].x, H->scat_uid[i].y, H->scat_uid[i].z, H->scat_uid[i].w,
H->scat_sig[i].x, H->scat_sig[i].y, H->scat_sig[i].z, H->scat_sig[i].w,
H->scat_rcs[i].x, H->scat_rcs[i].y, H->scat_rcs[i].z, H->scat_rcs[i].w,
H->scat_aux[i].s0, 2000.0f * H->scat_pos[i].w);
}
static void RS_show_att_i(RSHandle *H, const size_t i) {
printf(" [%7d %7d %5d %4d] p( %9.2f, %9.2f, %9.2f, %4.1f ) v( %7.2f %7.2f %7.2f ) o( %7.4f %7.4f %7.4f %7.4f) s( %10.3e, %10.3e, %10.3e, %10.3e ) x( %10.3e %10.3e %10.3e %10.3e ) a( %10.3e %10.3e %10.3e %10.3e )\n",
H->scat_uid[i].x, H->scat_uid[i].y, H->scat_uid[i].z, H->scat_uid[i].w,
H->scat_pos[i].x, H->scat_pos[i].y, H->scat_pos[i].z, 2000.0f * H->scat_pos[i].w,
H->scat_vel[i].x, H->scat_vel[i].y, H->scat_vel[i].z,
H->scat_ori[i].x, H->scat_ori[i].y, H->scat_ori[i].z, H->scat_ori[i].w,
H->scat_sig[i].x, H->scat_sig[i].y, H->scat_sig[i].z, H->scat_sig[i].w,
H->scat_rcs[i].x, H->scat_rcs[i].y, H->scat_rcs[i].z, H->scat_rcs[i].w,
H->scat_aux[i].x, H->scat_aux[i].y, H->scat_aux[i].z, H->scat_aux[i].w);
}
void RS_show_scat_pos(RSHandle *H) {
size_t i, w;
printf("A subset of meteorological scatterer POS, VEL & ORI:\n");
for (w = 0; w < H->num_workers; w++) {
for (i = H->workers[w].origins[0];
i < H->workers[w].origins[0] + H->workers[w].counts[0];
i += H->workers[w].counts[0] / RS_SHOW_DIV) {
RS_show_scat_i(H, H->offset[w] + i);
}
}
if (H->counts[1] == 0) {
return;
}
printf("A subset of debris[1] POS, VEL & ORI:\n");
for (w = 0; w < H->num_workers; w++) {
if (H->workers[w].counts[1]) {
for (i = H->workers[w].origins[1];
i < H->workers[w].origins[1] + H->workers[w].counts[1];
i += H->workers[w].counts[1] / RS_SHOW_DIV) {
RS_show_scat_i(H, H->offset[w] + i);
}
}
}
}
void RS_show_scat_sig(RSHandle *H) {
size_t i, w;
printf("A subset of meteorological scatterer SIG, RCS & AUX:\n");
for (w = 0; w < H->num_workers; w++) {
for (i = 0; i < H->workers[w].counts[0]; i += H->workers[w].counts[0] / RS_SHOW_DIV) {
RS_show_rcs_i(H, H->offset[w] + H->workers[w].origins[0] + i);
}
}
if (H->counts[1] == 0) {
return;
}
printf("A subset of debris[1] scatterer SIG, RCS, & AUX:\n");
for (w = 0; w < H->num_workers; w++) {
for (i = 0; i < H->workers[w].counts[1]; i += H->workers[w].counts[1] / RS_SHOW_DIV) {
RS_show_rcs_i(H, H->offset[w] + H->workers[w].origins[1] + i);
}
}
}
void RS_show_scat_att(RSHandle *H) {
size_t i, w;
printf("A subset of meteorological scatterer POS, VEL, ORI, SIG, RCS, and AUX:\n");
for (w = 0; w < H->num_workers; w++) {
for (i = 0; i < H->workers[w].counts[0]; i += H->workers[w].counts[0] / RS_SHOW_DIV) {
RS_show_att_i(H, H->offset[w] + H->workers[w].origins[0] + i);
}
}
if (H->counts[1] == 0) {
return;
}
printf("A subset of debris[1] scatterer POS, VEL, ORI, SIG, RCS, and AUX:\n");
for (w = 0; w < H->num_workers; w++) {
for (i = 0; i < H->workers[w].counts[1]; i += H->workers[w].counts[1] / RS_SHOW_DIV) {
RS_show_att_i(H, H->offset[w] + H->workers[w].origins[1] + i);
}
}
}
void RS_show_pulse(RSHandle *H) {
unsigned int i;
printf(" %7.5fs - [", H->sim_tic);
for (i = 0; i < MIN(4, H->params.range_count); i++) {
if (i > 0) {
printf(",");
}
printf(" %9.2f", H->pulse[i].s0);
}
if (i < H->params.range_count) {
printf(", . . . , %9.2f", H->pulse[H->params.range_count - 1].s0);
}
printf(" ] (%d)\n", H->params.range_count);
}
#pragma mark -
RSBox RS_suggest_scan_domain(RSHandle *H) {
RSBox box;
memset(&box, 0, sizeof(RSBox));
if (H->P == NULL) {
rsprint("WARNING. No scanning pattern for RS_set_scan_pattern().\n");
rsprint("WARNING. Using a default PPI scan pattern.\n");
H->P = POS_init();
}
if (H->L == NULL) {
RS_set_vel_data_to_config(H, LESConfigSuctionVortices);
}
if (H->verb > 1) {
rsprint("RS_suggest_scan_domain()");
}
// Extremas of the domain
float w = 0.0f, h = 0.0f;
float na = 0.0f, ne = 0.0f, nr = 0.0f;
POSPattern *scan = H->P;
// Extremas of the domain
if (H->vel_desc.is_stretched) {
w = H->vel_desc.ax * (1.0f - powf(H->vel_desc.rx, 0.5f * (float)(H->vel_desc.nx - 3))) / (1.0f - H->vel_desc.rx);
h = H->vel_desc.az * (1.0f - powf(H->vel_desc.rz, (float)(H->vel_desc.nz - 1))) / (1.0f - H->vel_desc.rz);
} else {
w = 0.5f * H->vel_desc.nx * H->vel_desc.rx;
h = H->vel_desc.nz * H->vel_desc.rz;
}
//if (POS_is_dbs(scan)) {
if (H->sim_concept & RSSimulationConceptVerticallyPointingRadar) {
//printf("range_delta = %.2f\n", H->params.range_delta);
// Go through the DBS pattern
float emin = 90.0f;
float emax = 0.0f;
float rmin = 100.0f;
float rmax = ceilf((2000.0f - rmin) / (2.0f * H->params.range_delta)) * 2.0f * H->params.range_delta;
int j = 0;
while (j < scan->count) {
emin = MIN(emin, scan->positions[j].el);
emax = MAX(emax, scan->positions[j].el);
j++;
}
//rsprint("scan_count = %d emin = %.2f emax = %.2f rmin = %.2f rmax = %.2f\n", scan->count, emin, emax, rmin, rmax);
//w = rmax * cos(emin / 180.0f * M_PI);
//h = rmax * sin(emax / 180.0f * M_PI);
if (rmax * cos(emin / 180.0f * M_PI) > w) {
rsprint("WARNING. Elevation %.2f at %.2f m exceeds the LES domain width = %.2f.\n", emin, rmax, w);
}
if (rmax * sin(emax / 180.0f * M_PI) > h) {
rsprint("WARNING. Elevation %.2f at %.2f m exceeds the LES domain height = %.2f.\n", emax, rmax, h);
}
na = 360.0f;
ne = ceilf((emax - emin) / H->params.antenna_bw_deg);
nr = ceilf((rmax - rmin) / H->params.dr * 0.5f) * 2.0f;
box.origin.a = 0.0f;
box.size.a = 360.0f;
box.origin.e = emin;
box.size.e = emax - emin;
box.origin.r = rmax - (nr + RS_DOMAIN_PAD - 1.0f) * H->params.dr;
box.size.r = floorf(nr - RS_DOMAIN_PAD - 1.0f) * H->params.dr;
} else {
// TO DO:
// Derive azimuth swath based on POSPattern
//
// ...
const int nbeams = 16;
// Maximum number of beams plus the padding on one side in azimuth
na = 0.5f * (float)nbeams + RS_DOMAIN_PAD + 0.5f;
// Maximum number of beams in elevation
ne = 18.0f;
// Maximum y of the emulation box: The range when the width is fully utilized; This is also rmax
float rmax = w / sinf(na * H->params.antenna_bw_rad);
// Minimum y of the emulation box: The range when the height is fully utilized
float rmin = (rmax - 2.0f * w) / cosf(na * H->params.antenna_bw_rad) / cosf(ne * H->params.antenna_bw_rad);
// If we cannot respect the padding on both sides
// Maximum number of range cells minus the padding on both sides minus one radar cell
nr = (rmax - rmin) / H->params.dr - 2.0f * RS_DOMAIN_PAD - 1.0f;
nr = ceilf(nr * 0.5f) * 2.0f;
if (rmax - rmin < 8.0f * H->params.dr) {
rsprint("ERROR: Range resolution of the radar is too coarse!");
rsprint("rmax = %.3f rmin = %.3f dr = %.2f", rmax, rmin, H->params.dr);
}
box.origin.a = ceilf(-0.5f * (float)nbeams) * H->params.antenna_bw_rad * 180.0f / M_PI;
box.size.a = nbeams * H->params.antenna_bw_deg;
box.origin.r = rmax - (nr + 2.0f * RS_DOMAIN_PAD - 1.0f) * H->params.dr;
box.size.r = floorf(nr - 2.0f * RS_DOMAIN_PAD - 1.0f) * H->params.dr;
box.origin.e = 0.0f;
box.size.e = ne * H->params.antenna_bw_deg;
}
if (H->verb) {
printf("%s : RS : Suggest scan box < [ 2w = %.1f m, h = %.1f m ] nr = %.1f na = %.1f ne = %.1f\n"
"%s : RS : Best fit with R:[ %5.2f ~ %5.2f ] km E:[ %5.2f ~ %5.2f ] deg A:[ %6.2f ~ %6.2f ] deg\n",
now(), 2.0f * w, h, nr, na, ne,
now(), 1.0e-3f * box.origin.r, 1.0e-3f * (box.origin.r + box.size.r), box.origin.e, box.origin.e + box.size.e, box.origin.a, box.origin.a + box.size.a);
}
return box;
}
void RS_compute_rcs_ellipsoids(RSHandle *H) {
int i;
const cl_double k_0 = H->sim_desc.s[RSSimulationDescriptionWaveNumber] * 0.5f;
const cl_double epsilon_0 = 8.85418782e-12f;
const cl_double4 epsilon_r_minus_one = (cl_double4){{78.669, 18.2257, 78.669, 18.2257}};
if (H->verb > 1) {
rsprint("RS_compute_rcs_ellipsoids()\n");
}
//
// Sc = k_0 ^ 2 / (4 * pi * epsilon_0)
// Coefficient 1.0e-9 for scaling the volume to unit of m^3
//
if (H->verb) {
rsprint("Drop concentration scaling = %s (k_0 = %.4f)\n", commafloat(H->sim_desc.s[RSSimulationDescriptionDropConcentrationScale]), k_0);
}
const cl_double sc = k_0 * k_0 / (4.0f * M_PI * epsilon_0) * 1.0e-9f * H->sim_desc.s[RSSimulationDescriptionDropConcentrationScale];
// Make table with D = 0.5mm, 0.6mm, ... 10.0mm (96 entries)
const size_t n = 96;
cl_float4 *table = (cl_float4 *)malloc(n * sizeof(cl_float4));
if (H->sim_concept & RSSimulationConceptTransparentBackground) {
memset(table, 0, n * sizeof(cl_float4));
} else {
for (i = 0; i < n; i++) {
// Diameter (mm) to be computed
cl_double d = 0.5 + (cl_double)i * 0.1;
cl_double d2 = d * d;
cl_double d3 = d2 * d;
cl_double d4 = d3 * d;
cl_double rba = 1.0048 + (0.0057e-1 * d) - (2.628e-2 * d2) + (3.682e-3 * d3) - (1.677e-4 * d4);
cl_double rab = 1.0f / rba;
cl_double fsq = rab * rab - 1.0;
cl_double f = sqrt(fsq);
cl_double lz = (1.0 + fsq) / fsq * (1.0 - atan(f) / f);
cl_double lx = (1.0 - lz) * 0.5;
cl_double vol = M_PI * d3 / 6.0;
cl_double4 numer = double_complex_multiply((cl_double4){{vol * epsilon_0, 0.0, vol * epsilon_0, 0.0}}, epsilon_r_minus_one);
cl_double4 denom = {{
lx * epsilon_r_minus_one.s0 + 1.0,
lx * epsilon_r_minus_one.s1,
lz * epsilon_r_minus_one.s2 + 1.0,
lz * epsilon_r_minus_one.s3
}};
cl_double4 alxz = double_complex_divide(numer, denom);
// Reduced precision at the very last step
table[i].s0 = (cl_float)(sc * alxz.s0);
table[i].s1 = (cl_float)(sc * alxz.s1);
table[i].s2 = (cl_float)(sc * alxz.s2);
table[i].s3 = (cl_float)(sc * alxz.s3);
#ifdef DEBUG_HEAVY
rsprint("D = %.2fmm rba %.4f rab %.4f lz %.4f lx %.4f numer = %.3e %.3e %.3e %.3e denom = %.3f %.3f %.3f %.3f alxz = %.3e %.3e %.3e %.3e lx/lz = %.3e %.3e %.3e %.3e",
d, rba, rab, lz, lx, numer.s0, numer.s1, numer.s2, numer.s3, denom.s0, denom.s1, denom.s2, denom.s3, alxz.s0, alxz.s1, alxz.s2, alxz.s3, table[i].s0, table[i].s1, table[i].s2, table[i].s3);
#endif
}
// Each size has same probably of occurence, the return power is scaled by the ratio of the
if (H->sim_concept & RSSimulationConceptUniformDSDScaledRCS) {
int k;
float s;
const float p = 1.0f / (float)H->dsd_count;
cl_float4 *table_copy = (cl_float4 *)malloc(n * sizeof(cl_float4));
memcpy(table_copy, table, n * sizeof(cl_float4));
memset(table, 0, n * sizeof(cl_float4));
if (H->dsd_count) {
snprintf(H->summary + strlen(H->summary), sizeof(H->summary), "Drop RCS Scaling:\n");
for (i = 0; i < H->dsd_count; i++) {
k = (int)(H->dsd_r[i] * 20000.0f) - 5;
s = sqrtf(H->dsd_pdf[i] / p);
if (H->verb) {
printf(RS_INDENT "o %.2f mm scale by %.4f / %.4f = %.4f = %.2f dB k = %d\n", 2000.0f * H->dsd_r[i], H->dsd_pdf[i], p, s, 20.0f * log10f(s), k);
}
snprintf(H->summary + strlen(H->summary), sizeof(H->summary), " o %.2f mm %.5f -> %.2f dB\n", 2000.0f * H->dsd_r[i], H->dsd_pdf[i], 20.0f * log10f(s));
table[k].s0 = table_copy[k].s0 * s;
table[k].s1 = table_copy[k].s1 * s;
table[k].s2 = table_copy[k].s2 * s;
table[k].s3 = table_copy[k].s3 * s;
}
}
#ifdef DEBUG_HEAVY
for (i = 0; i < n; i++) {
cl_double d = 0.5 + (cl_double)i * 0.1;
rsprint("D = %.2fmm %.3e %.3e %.3e %.3e --> %.3e %.3e %.3e %.3e", d, table_copy[i].s0, table_copy[i].s1, table_copy[i].s2, table_copy[i].s3, table[i].s0, table[i].s1, table[i].s2, table[i].s3);
}
#endif
free(table_copy);
}
}
// Set table lookup in radius in mm
RS_set_rcs_ellipsoid_table(H, table, 0.25e-3f, 0.05e-3f, n);
free(table);
}
char *RS_simulation_description(RSHandle *H) {
char *c = H->summary + strlen(H->summary) - 1;
if (*c == '\n') {
*c = 0;
}
return H->summary;
}
|
<filename>ios/versioned-react-native/ABI32_0_0/Expo/Core/Api/Components/GestureHandler/ABI32_0_0RNGestureHandlerRegistry.h
//
// ABI32_0_0RNGestureHandlerRegistry.h
// ABI32_0_0RNGestureHandler
//
// Created by <NAME> on 12/10/2017.
// Copyright © 2017 Software Mansion. All rights reserved.
//
#import "ABI32_0_0RNGestureHandler.h"
@interface ABI32_0_0RNGestureHandlerRegistry : NSObject
- (nullable ABI32_0_0RNGestureHandler *)handlerWithTag:(nonnull NSNumber *)handlerTag;
- (void)registerGestureHandler:(nonnull ABI32_0_0RNGestureHandler *)gestureHandler;
- (void)attachHandlerWithTag:(nonnull NSNumber *)handlerTag toView:(nonnull UIView *)view;
- (void)dropHandlerWithTag:(nonnull NSNumber *)handlerTag;
@end
|
<reponame>szacho/augmix-tf<filename>augmix/transformations.py
import math
import tensorflow as tf, tensorflow.keras.backend as K
from .helpers import *
def rotate(image, level):
degrees = float_parameter(sample_level(level), 30)
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
degrees = tf.cond(rand_var > 0.5, lambda: degrees, lambda: -degrees)
pi = tf.constant(math.pi)
angle = pi*degrees/180 # convert degrees to radians
angle = tf.cast(angle, tf.float32)
# define rotation matrix
c1 = tf.math.cos(angle)
s1 = tf.math.sin(angle)
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one], axis=0), [3,3])
transformed = affine_transform(image, rotation_matrix)
return transformed
def translate_x(image, level):
lvl = int_parameter(sample_level(level), image.shape[0] / 3)
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
lvl = tf.cond(rand_var > 0.5, lambda: lvl, lambda: -lvl)
one = tf.constant([1], dtype='float32')
zero = tf.constant([0], dtype='float32')
lvl = tf.cast(lvl, tf.float32)
translate_x_matrix = tf.reshape(tf.concat([one,zero,zero, zero,one,lvl, zero,zero,one], axis=0), [3,3])
transformed = affine_transform(image, translate_x_matrix)
return transformed
def translate_y(image, level):
lvl = int_parameter(sample_level(level), image.shape[0] / 3)
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
lvl = tf.cond(rand_var > 0.5, lambda: lvl, lambda: -lvl)
one = tf.constant([1], dtype='float32')
zero = tf.constant([0], dtype='float32')
lvl = tf.cast(lvl, tf.float32)
translate_y_matrix = tf.reshape(tf.concat([one,zero,lvl, zero,one,zero, zero,zero,one], axis=0), [3,3])
transformed = affine_transform(image, translate_y_matrix)
return transformed
def shear_x(image, level):
lvl = float_parameter(sample_level(level), 0.3)
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
lvl = tf.cond(rand_var > 0.5, lambda: lvl, lambda: -lvl)
one = tf.constant([1], dtype='float32')
zero = tf.constant([0], dtype='float32')
s2 = tf.math.sin(lvl)
shear_x_matrix = tf.reshape(tf.concat([one,s2,zero, zero,one,zero, zero,zero,one],axis=0), [3,3])
transformed = affine_transform(image, shear_x_matrix)
return transformed
def shear_y(image, level):
lvl = float_parameter(sample_level(level), 0.3)
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
lvl = tf.cond(rand_var > 0.5, lambda: lvl, lambda: -lvl)
one = tf.constant([1], dtype='float32')
zero = tf.constant([0], dtype='float32')
c2 = tf.math.cos(lvl)
shear_y_matrix = tf.reshape(tf.concat([one,zero,zero, zero,c2,zero, zero,zero,one],axis=0), [3,3])
transformed = affine_transform(image, shear_y_matrix)
return transformed
def solarize(image, level):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
threshold = float_parameter(sample_level(level), 1)
return tf.where(image < threshold, image, 1 - image)
def solarize_add(image, level):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128.
threshold = float_parameter(sample_level(level), 1)
addition = float_parameter(sample_level(level), 0.5)
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
addition = tf.cond(rand_var > 0.5, lambda: addition, lambda: -addition)
added_image = tf.cast(image, tf.float32) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 1), tf.float32)
return tf.where(image < threshold, added_image, image)
def posterize(image, level):
lvl = int_parameter(sample_level(level), 8)
shift = 8 - lvl
shift = tf.cast(shift, tf.uint8)
image = tf.cast(tf.math.scalar_mul(255, image), tf.uint8)
image = tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
return tf.cast(tf.clip_by_value(tf.math.divide(image, 255), 0, 1), tf.float32)
def autocontrast(image, _):
image = tf.cast(tf.math.scalar_mul(255, image), tf.uint8)
def scale_channel(image):
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return tf.cast(tf.clip_by_value(tf.math.divide(image, 255), 0, 1), tf.float32)
def equalize(image, _):
image = tf.cast(tf.math.scalar_mul(255, image), tf.uint8)
def scale_channel(im, c):
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return tf.cast(tf.clip_by_value(tf.math.divide(image, 255), 0, 1), tf.float32)
def color(image, level):
factor = float_parameter(sample_level(level), 1.8) + 0.1
image = tf.cast(tf.math.scalar_mul(255, image), tf.uint8)
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
blended = blend(degenerate, image, factor)
return tf.cast(tf.clip_by_value(tf.math.divide(blended, 255), 0, 1), tf.float32)
def brightness(image, level):
delta = float_parameter(sample_level(level), 0.5) + 0.1
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
delta = tf.cond(rand_var > 0.5, lambda: delta, lambda: -delta)
return tf.image.adjust_brightness(image, delta=delta)
def contrast(image, level):
factor = float_parameter(sample_level(level), 1.8) + 0.1
factor = tf.reshape(factor, [])
rand_var = tf.random.uniform(shape=[], dtype=tf.float32)
factor = tf.cond(rand_var > 0.5, lambda: factor, lambda: 1.9 - factor )
return tf.image.adjust_contrast(image, factor)
|
Risk assessment and risk management of contaminants in the feed to food chain In feed production processes, factories usually produce different mixtures within the same production line. Consequently, remainders of the first-produced feed can stay in the system and be mixed with the following feed charge. This type of transfer (carry-over) is unavoidable in the production systems currently used, and thus, non-medicated feed can be contaminated with veterinary drugs present in a previously manufactured charge of medicated feed. The carry-over of veterinary medicinal products is associated with the risk of residues remaining in the tissues of treated animals at the time of slaughter and poses a health hazard to consumers. Producing safe feed and food products is, first and foremost, a question of good management practices at each stage of the feed and food chain, from primary production to final processing. Primary responsibility for feed safety rests with the feed business operator, who must ensure that all stages of production, processing and distribution under their control are carried out in accordance with relevant legislation, good manufacturing practice and principles contained in the HACCP system. Concrete steps for feed manufacturers to prevent drug carry-over are using one or more approved cleanout procedures of manufacturing equipment, such as cleaning, flushing or sequencing. Introduction The 'farm-to-fork' approach promoted by the European Union requires the assessment and control of major components of the food production chain, with emphasis on primary production. Feeds must satisfy the nutritional requirements of the relevant animal species, and they are expected to support safe and cost-effective production of foods of animal origin, as well as to ensure the welfare of farm animals. Adequate animal feedingstuffs, which are the main input into livestock production, should be used to ensure the final product reaching the market has the required quality and poses no risk to the consumer. The competitiveness of the agricultural sector because of globalization has led to the need for intensified productivity of animal production systems. For this reason, the stocking rate in poultry and pig production units was increased, causing a greater frequency of disease due to higher infection pressure. Although in recent years much emphasis has been placed on disease prevention through improved management and environmental conditions, intensive animal production systems still depend on drugs (antimicrobials), as shown by their continuously growing market. A common means to deliver such drugs is by including them in feed, as the large-scale use of feedlots makes this easy, and as it avoids handling animals for individual drug administration. However, Dunlop et al. and Varga Legal aspects of veterinary drug carry-over in feeds After production of medicated feed, it is very difficult to completely avoid carry-over of drugs into feed that should be free from such substances (zero tolerance). From the standpoint of legislation, the veterinary drug residues in feed are not allowed and their presence, determined during official monitoring, excludes the placing of such feed on the market. Currently, this principle applies to eight countries within the EU. However, there are alternative ways to avoid zero tolerance: direct use of orally-administered drug in powder form on the farm, top dressing or administration of drugs via drinking water. Each of these methods has certain application risks. Oral powders are usually not dosed into feed by specific, calibrated, devices, but are dosed manually by farmers, with evident weaknesses in such a process. The use of top dressing methods risk that strong, dominant, animals achieve over-intake, while weaker animals with less access to feed achieve lower intake than expected. In such a scenario, the target microbial pathogen in an animal is exposed to subtherapeutic dosage of the antimicrobial, so some significant number of the target pathogens survives treatment. This induces selection of drug-resistant microbial pathogens. The imprecision of delivering drugs via the drinking water system is reflected in the amount of water spilled and the variation in the amount of water the animals actually drink. Practical drawbacks are the creation of solid complexes in the pipes and obstruction of drinking nipples, which affect the precision of drug dosing. Most countries in the European Union do not have clearly established national limits for unwanted carry-over of veterinary drugs in non-target feed, while three countries have established limit values, primarily based more on the ALARA values (As Low As Reasonably Achievable), rather than risk assessment for public health: 1. In Belgium, ALARA values are applied, but only under the conditions that the level of crosscontamination cannot cause: a) animal health disorder; b) exceeded MRL in products of animal origin, or; c) increased antimicrobial resistance. In particular, the upper limit values for contamination should never be above 2.5% of the minimum prescribed dose for antibiotics or 3% of the maximum prescribed dose for antihelmintics. 2. In France, validation of the production process is applied: the maximum permitted contamination by VMPs is 5% in the first and 1% in the second charge after production of the last medicated feed batch. 3. The legal status in the Netherlands is still in the process of adoption, but the maximum permitted contamination in non-target feed will be up to 2.5% of the lowest dose of VMP permitted in the targeted feed. Stolker et al. have shown that the percentage of veterinary medicine carry-over (in the production of medicated feed for pigs in the Netherlands) is not correlated with the percentage determined by standard Good Manufacturing Practice (GMP+) procedures. More precisely, it is not possible to predict the concentration of antibiotic in a flushing charge based on determined percentage of carry-over in the feed production plant. The inability to avoid carry-over, non-homogeneity in the production of medicated feed, and the previously stated difficulties in predicting the level of carry-over, along with increasing concern about the growing problem of microbial resistance, motivated NEVEDI, an association of Dutch feed manufacturers, to announce that they would voluntarily stop the production of medicated feed in 2011, which was the first case of this kind in Europe. In Serbia, the Rulebook on the Quality of Feed states (in Article 88) that feed mixtures must not contain antibiotics or sulphonamides, i.e. zero tolerance is applied, so these substances must not be present in feedingstuffs. Legal aspects of carry-over of coccidiostatics and histomonostatics In addition to carry-over of veterinary medicines, special attention is required to understand regulatory aspects concerning the presence of coccidiostatics and histomonostatics in non-target feed. Coccidiostats and histomonostats are substances intended to inhibition the growth or destruction of protozoa, and these substances can, inter alia, be approved for use as feed additives in accordance with European Regulatory Council Regulation (EC) No 1831/2003. It can be a confusing fact that some coccidiostats are registered not as feed additives but as drugs, i.e. VMPs. For active substances in the VMP that are the same as a substance in a feed additive, the applicable maximum level of crosscontamination in non-target feed is the maximum content of feed additive in complete feed established in the relevant Union act. A list of the named coccidiostats (registered as VMPs) is given in the Annex of Allowed Substances in Commission Regulation No. 37/2010 and consists of Amprolium, Decoquinate, Diclazuril, Halofuginone, Imidocarb, Lasalocide and Toltrazuril. As such, they can be used in the production of medicated feed, based on veterinary prescription. They are most commonly used in the breakthrough of coccidiosis, where no coccidiostatics are added in feed, in cases of development of resistance, or when the vaccines are insufficiently efficacious. Carry-over of coccidiostatics and histomonostatics can lead to contamination of feed where the use of coccidiostatics or histomonostatics is not authorized, such as feed for animal species or categories not specified in the authorization of the additive. This inevitable cross-contamination can occur at every stage of production and processing of feed, as well as during storage and transport of feed. Inevitable transfer of active substances contained in approved coccidiostats and histomonostats into non-target feeds results in the presence of undesirable substances in the feed in accordance with Directive 2002/32/EC. Thus, taking into account the application of good manufacturing practice, the maximum level of unavoidable carry-over should be established according to the ALARA principle. In order to allow the feed producer to manage the inevitable transfer, a transfer rate of about 3% (in relation to the maximum allowed content) should be taken into account in terms of feed for less sensitive animal species, while for feed intended for sensitive non-target species and feed with a withdrawal period, i.e. feed used in the preslaughter period, a transfer rate of about 1% can be considered. A transfer rate of 1% should also be established for the cross-contamination of other feed for the target species when it has no added coccidiostats and histomonostats, as well as for non-target feed for animals such as dairy cows or layer hens, where clear evidence exists of transfer from feed to food of animal origin. In the European Union, this described problem is regulated by Commission Directive 2009/8/EC of 10 February 2009. In that Directive, the maximum level of imminent carry-over in non-target feed for 11 coccidiostats has been set: Lasalocid sodium, Narasin, Salinomycin sodium, Monensin sodium, Semumramycin sodium, Maduramycin ammonium alpha, Robenidine hydrochloride, Decoquinate, Halofuginone hydrobromide, Nicarbazine and Diclazuril. The levels are expressed in mg/kg (ppm) in feed with a moisture content of 12% (Table 1) and are aimed at avoiding excessive exposure of animals to these compounds, since most of the compounds have a relatively low safety margin and their higher concentration in feed can cause harmful effects even in the target animal species. In Serbia, in accordance with the harmonization of the regulations regarding the animal feed safety sector, in the Rulebook on the Quality of Feed, Article 99 (maximum permissible harmful substances) established identical levels as the EU has for these above-mentioned 11 coccidiostats, while the manner of their use and their maximum residue levels in food are given in Article 89. When interpreting this Rulebook, it is necessary to note that most coccidiostats have been registered as additives only until 2018, so the time limit for placing them on the market has already expired. However, other coccidiostats have been registered as additives for longer (until 2020, 2021, 2022, 2023, or 2025), and so all these compounds must by carefully selected for use in feeds on the basis of their being registered as additives or not. Risk assessment of carry-over The primary responsibility for feed safety rests with the feed business operator, who must ensure that all stages of production, processing and distribution under their control are carried out in accordance with relevant legislation and good manufacturing practice. The feed producer (commercial or farm) is obligated to ensure that the exact amount of the desired drug is correctly incorporated and that there is no cross-contamination of any unwanted drug in that feed. In feed production processes, factories produce different mixtures within the same production line. Consequently, remainders of the first-produced feed can stay in the system and be mixed with the following feed charge. This type of transfer is unavoidable in the production systems currently used, and thus, non-medicated feed can be contaminated with veterinary drugs present in a previously manufactured charge of medicated feed. Carry-over is (usually) expressed as the percentage of the nutrient, veterinary medicament and/or contaminant from one feed batch that ends up in the following feed batch (flushing charge). Stolker et al. have documented that sometimes a flushing feed is contaminated not only with the antibiotic used directly before the production of the flushing feed, but also with an antibiotic used several batches earlier in the production process. The same authors pointed to the importance of testing the homogeneity of the flushing charge, i.e. determining whether the drug is homogeneously distributed in the feed. They found that during the first 20 minutes in the production cycle, the flushing charge contained oxytetracycline at concentrations >2.5% of the allowed transfer, or significantly higher than the last part of the produced batch. If the flushing charge can be easily flushed, the concentration of the contaminant from the previous batch will be very high at the beginning and will rapidly fall, but in contrast, in the case of slow flushing, the concentration of the given substance will only gradually decrease. These data must be taken into account when taking the first kilogram of this type of feed and giving it to a non-target (for the antibiotic-sensitive) animal species, as well as possible errors in the interpretation of results if a sample of such feed is sent for further analysis. The type of drug (category I or II), the number of animal species for which the drug is intended, and the feed delivery system determine the degree of risk associated with carry-over. Feed plants producing feed only for one type of animal and using only Category I drugs (which do not have a withdrawal period) have the least risk of contamination and the occurrence of residues in tissues of non-target animals. Since there is no withdrawal period for these products, they can be used until animals are slaughtered, and subsequent animal products can immediately be released to the market. In contrast, carry-over of Category II drugs (which have a withdrawal period) into feed could result in the unwanted presence of residues in meat, dairy products and eggs of animals. The carry-over of drugs classified as either Category I or Category II into a batch of feed intended for a species the drug is not intended for can create serious health problems for any such animal consuming the feed. The carry-over of Monensin from cattle feed to horse feed which can result in lethal outcomes. The high sensitivity of horses to Monensin is associated with their lack of demethylation enzymes, which facilitate the clearance of Monensin from the animals' systems. The U.S. Food and Drug Administration issued warning letters in 2018 for two feed mills in Minnesota, and Nebraska that mixed horse feed containing monensin. These firms did not adhere to Current Good Manufacturing Practice (CGMP) requirements for medicated feed mills. These incidents of monensin toxicity should be a reminder to all feed producers that make medicated feeds that they must remain vigilant in adhering to CGMP requirements by eliminating unsafe carry-over of medications into feed intended for different species. Guidance for Industry # 72 (GMPs for Medicated Feed Manufacturers Not Required to Register and Being Licensed with FDA) and Guidance for Industry # 235 (Current Good Manufacturing Practice Requirements for Food for Animals) are documents that provide explanation and examples of how to meet the FDA's requirements for safe animal feed production. Carry-over can appear in one segment of the production line or can be a result of a combination of residues along the whole system. In order to discover the cause of carry-over, all equipment must be taken into account, from the place of delivery of the medicine to the loading zone, but carry-over occurring during transport or on the farm itself must also be considered. O'Keeffe et al. identified multiple potential causes for the presence of coccidiostat (Nicarbazine) residues in edible livestock tissues: contamination of feed in mixtures and/or during transport, supply of wrong feed, delivery of feed to the wrong bin on the farm, inadequate cleaning of the feeding system on farms before delivery of replacement feed, inadequately applied withdrawal period for feed with coccidiostatics, poor farm management that led to re-exposure of poultry to Nicarbazine in the period immediately prior to slaughter, and fecal recycling of Nicarbazine from the litter. McEvoy et al. also pointed to the importance of particles of dust and excess material remaining during the pelleting process as an important factor in carry-over. The production practice at one factory was such that material returned to pre-press and contaminated the next production lot. The most important sources of carry-over, related to production equipment, are summarized in Table 2. In Good Manufacturing Practice guidance, the main causes of carry-over are the dosing/grinding/mixing line, the press line, and the measurement stations within the lines. This type of carry-over is termed installation carry-over. Equipment Mode of Carry-over Risk management of carry-over When the sources of carry-over are revealed, corrective measures can be taken. The basic principles that feed business operators should establish, implement and maintain are contained in the HACCP system. HACCP principles are largely limited to the ability to carry out the following: (a) Identify any hazards that must be prevented, eliminated or reduced to acceptable levels; (b) Identify the critical control points at the step or steps at which the control is essential to prevent or eliminate a hazard or reduce it to acceptable levels; (c) Establish critical limits at critical control points which separate the acceptability from unacceptability, for the prevention, elimination or reduction of identified hazards; (d) Establish and implement effective monitoring procedures at critical control points; (e) Establish corrective action when monitoring indicates that a critical control point is not under control; (f) Establish procedures to verify that the measures outlined in points (a) to (e) are complete and effective. Verification procedures will be carried out regularly; (g) Establish documents and records commensurate with the nature and size of the feed business to demonstrate the effective application of the measures set out in points (a) to (f). HACCP principles can help feed business operators to achieve a higher standard of feed safety, but should not be considered as a method of self-regulation and do not replace official controls. Each plant must establish its own rules and manage carry-over based on their own HACCP program. Although most feed businesses are familiar with ISO 9000, it focuses on systems and procedures. However, HACCP is different, as it focuses on the product. ISO 9000 and similar standards are not an essential requirement for successful HACCP programs. Concrete steps for feed manufacturers to prevent drug carry-over are set by CGMP requirements, and they are involve using one or more approved cleanout procedures for the manufacturing equipment,. The FDA's CGMP requirements serve as guidelines for medicated feed manufacturers to ensure that their products meet identity, strength, and quality standards. Cleaning the equipment Equipment cleaning is still not widespread in the feed manufacturing industry, but it is potentially the most effective method of avoiding carry-over during processing and delivery of feed. It is mainly applied in high risk situations: dealing with medical premixes; when sequencing can not be included in the production schedule; with portable grinder-mixers; when the physical properties of the drug are such (adhesion strength and electrostatic properties) that sequencing and flushing do not prevent carry-over, and; if liquid ingredients (molasses or fat) are added during the feed mixture production. In all of these cases, physically cleaning the production equipment (cleaning of the mixer, transport system, pellet coolers, bins) or delivery trucks is required. This involves completely stopping production, which is impractical and economically burdensome for the factory. However, GMP stipulates that all equipment should be designed, constructed, installed and maintained in such a way as to facilitate the inspection and use of cleaning procedures. In terms of cleaning efficiency, horizontal mixers have an advantage over vertical ones. A typical cross-section of a double-ribbon horizontal mixer is shown in Figure 1. When the mixer has been emptied, the residual feed will remain in the space between the outer ribbon and the housing. Some mixers can be adjusted to reduce this space to about 6 mm, which reduces the feed carry-over to an innocuous level in most cases. A typical single screw vertical mixer is shown in Figure 2, along with the location of the mixer discharge. A considerable amount of feed will remain in the mixer after the last feed leaves the discharge opening. If a clean-out opening is provided down on the boot of the mixer, the residual feed can be removed there. If it is not removed, a significant amount (18 kilos or more) of carry-over can pass into the following feed charge. Some plants clean their equipment routinely, e.g. at monthly or bi-annual levels. When employees perform cleaning tasks, it is important to remove all waste and residues before the next production cycle, use safe and approved cleaning agents, use safe and clean tools, pay attention to the hygiene of the personnel involved in the task, and ensure that washing does not disseminate microbial or other contaminants in the equipment. After cleaning, inspection should safely review all available parts of production line (such as mixers, containers, conveyors, etc.) to ensure they are clean and that carry-over will not occur. Inspection should be carried out visually, without entering the mixer or bin. System flushing The flushing procedure involves passing a precise amount of a selected ingredient through the system to flush through any residual medicated feed produced in the previous batch. As a flushing material, grain meals are often used, most commonly ground corn of approximately 600 microns. Other suitable material that has been proven to adequately clean the production line can be used, as can wheat, limestone, and rice hulls. When material passes through the production system, it is mixed with the residual medicated feed from the previous batch, and dilutes the drug concentration to a safe level. The quantity of flushing material depends on the system; it usually amounts to about 5-10% of the mixer capacity and should not be less than 90 kilograms. Some tests have shown that flushing material amounting to 1-2.5% of the mixer capacity can be effective in preventing carry-over. Also, the plant should check with mixer's manufacturer for their recommendation for flushing material type, and choose the best option. Due to the degree of variability among facilities, feedmills should determine their facility's individual characteristics and apply appropriate time and volume requirements for flushing material to accomplish the intent of the procedures. The volume used should be stated in the written procedures, and should be based on documented analysis or tests of the firm's system. After the flushing material is added to the mixer, the mixer should be allowed to operate for at least 1 minute before the material is removed. After the mixer is flushed, the material should pass through the whole production system along the same pathway the previously manufactured medicated feed passed. The flushing material, from that moment, must be correctly identified and stored in order to prevent contamination, and later it can be used in the production of the same medicated feed. Some plants use this flushing material to flush trucks for bulk deliveries after they make deliveries to the farm. When applying these procedures, the economic implications of the need to store the flushing material should be taken into account. Some companies choose to simply discard this material in order to avoid subsequent possible production errors, which is certainly the economically least attractive option. Manufacturers must document the applicable flushing procedures: the flushing method, the flushing time, the amount and type of flushing material and the disposal of the flushing material. To monitor flushing, inspection must ensure that feed producers adequately apply their own procedures. It is necessary to check that the entire system is flushed (including mixer, conveyors, bins) and to visually determine whether any foreign material is not in accordance with the flushing material. Finally, quantity of flushing material released into the system must be present at the end of the flushing process. Sequencing procedure The feed industry most often uses sequencing because it minimizes discontinuation of the production line. If properly planned and executed, this method is the most cost-effective carry-over prevention procedure. The order in which the feed is prepared, processed and delivered directly determines the probability of carrying over the drug from one to the next batch and, consequently, the presence of residues in the tissues of the animals that consume such feed. This work plan involves production of all medicated feeds that contain the same drug in the sequence from the highest to the lowest level of the drug. After completion of the last batch of medicated feed, the production of non-medicated feed for the same animal species continues. Examples of accepted principles to be considered when designing the sequencing process are: Withdrawal feed and feed for cattle should not be produced and processed in the same equipment after the manufacture of medicated feed containing Category II drugs, unless appropriate cleaning procedures are applied. Drugs with specific toxicity characteristics, such as Monensin's toxicity for horses, require special attention. After the production of medicated feed containing Category II drugs, feed for the same species that are below the marketable age or weight may be produced. Feeds that have high potential for dangerous drug contamination (feed for withdrawal, dairy animals, etc.) should be produced first in the series, and feed with the most toxic drugs will be the last in the sequencing process, followed by complete physical cleaning of the system. 10 Sequencing procedures and practices should be clearly understood by all persons responsible for the planning and production of medicated feed. They should be easily accessible for their use. During the sequencing of feed, the age of the animal, the sensitivity to the administered drug, and the type and purpose of the drug should be considered. For example, after production of medicated feed containing oxytetracycline for broilers, non-medicated feed for layers should not be produced. Given that a very small amount of sulfamethazine consumed by pigs can lead to residues of this drug in meat, it is not acceptable to manufacture feed for pigs after the production of medicated feed with sulfamethazine. Feed for pigs containing Carbodox should not be followed by unmedicated feed for gravid sows. After production of Monensin-containing feed, only non-medicated feed for cattle, poultry, or pigs can follow, but not feed for horses. If feed is produced for only one type of animal, such as pigs, the most common medicated feed is for the youngest, most vulnerable category, in this case piglets. In this case, the following order is applied: first, feed containing a drug that requires a withdrawal period for piglets, then sow, grower and finally, finisher feed. Sequencing can also be used to clean containers on trucks, but the same principles should be followed. In most feed factories, feed sequencing procedures will reduce carry-over to a level that eliminates the potential for the presence of residues in animal tissue. However, the sequencing procedure cannot reduce carry-over to a sufficiently low level unless the problems listed in Table 2 have been previously resolved. When sequencing is applied, in order to avoid cross-contamination, precise records of feed production documentation are imperative, so the last batch in a series can always be safely marked. Otherwise, the sequencing procedure could be compromised by the next feed charge preparation. Periodic evaluation of sequencing procedure should be carried out to verify and validate their effectiveness. After the application of the described cleaning procedures, if the undesired carry-over of critical additives and VMPs can still be expected, then the company could take up the following measures: draw up a mandatory production (working) sequence; additional measures in the event of product changes; produce feeds with critical additives and VMPs on another line; switch to less critical agents. When carry-over is detected in a feed plant, harmful effects can occur in people and in animals that consume contaminated feed. This type of failure is considered a violation of the regulations, with all consequences for the responsible persons. Based on the Rules on the Establishment of the Feed Safety Monitoring Program for 2018, in Serbia, if the presence of contaminants in feed is detected, activities ranging from corrective measures to prohibition of operation and closure of the entire feed production facility will be implemented. |
package com.greatorator.ddtc.block;
import com.greatorator.ddtc.reference.Reference;
import net.minecraft.block.Block;
import net.minecraft.block.material.Material;
import net.minecraft.client.renderer.texture.IIconRegister;
import net.minecraft.creativetab.CreativeTabs;
import net.minecraft.util.IIcon;
public class MultiTextureBlock extends Block {
public IIcon[] icons = new IIcon[6];
protected MultiTextureBlock(String unlocalizedName, Material material) {
super(material);
this.setBlockName(unlocalizedName);
this.setBlockTextureName(Reference.MOD_ID + ":" + unlocalizedName);
this.setCreativeTab(CreativeTabs.tabBlock);
this.setHardness(2.0F);
this.setResistance(6.0F);
this.setStepSound(soundTypeGravel);
}
@Override
public IIcon getIcon(int side, int meta) {
return this.icons[side];
}
@Override
public void registerBlockIcons(IIconRegister reg) {
for (int i = 0; i < 6; i ++) {
this.icons[i] = reg.registerIcon(this.textureName + "_" + i);
}
}
}
|
The Holocaust in Italy
Situation prior to September 8, 1943
At the beginning of the twentieth century, Jews were a well-integrated minority in Italy. They had lived in the country for over two thousand years. After Benito Mussolini seized power in 1922, Italian Jews initially suffered far less persecution if no persecution at all in Fascist Italy than the Jews in Nazi Germany in the lead up to World War II. Some Fascist leaders, such as Achille Starace and Roberto Farinacci, were indeed antisemites, but others, such as Italo Balbo, were not, and until 1938 antisemitism was not the official policy of the party. Like the rest of Italians, Jews divided between fascists and anti-fascists. Some were sympathetic to the regime, joined the party and occupied significant offices and positions in politics and economy (Aldo Finzi, Renzo Ravenna, Margherita Sarfatti, Ettore Ovazza, Guido Jung). Others were active in anti-fascist organizations (Carlo Rosselli, Nello Rosselli, Leone Ginzburg, Umberto Terracini).
In 1938, under the Italian Racial Laws, Italian Jews lost their civil rights, including those to property, education and employment. They were removed from government jobs, the armed forces, and public schools (both as teachers and students). To escape persecution, around 6,000 Italian Jews emigrated to other countries in 1938-39. Among them were intellectuals such as Emilio Segrè, Bruno Rossi, Mario Castelnuovo-Tedesco, Franco Modigliani, Arnaldo Momigliano, Ugo Fano, Robert Fano, and many others. Enrico Fermi also moved to the United States, as his wife was Jewish.
In June 1940, after the outbreak of World War II, the Fascist Italian government opened around 50 concentration camps. These were used predominantly to hold political prisoners but also around 2,200 Jews of foreign nationality (Italian Jews were not interned). The Jews in these camps were treated no differently than political prisoners. While living conditions and food were often basic, prisoners were not subject to violent treatment. The Fascist regime even allowed a Jewish-Italian organization (DELASEM) to operate legally in support of the Jewish internees.
Conditions for non-Jews were much worse. Italian authorities perceived that imprisoned Roma were used to a harsh life, and they received much lower food allowances and more basic accommodation. After the occupation of Greece and Yugoslavia in 1941, Italy opened concentration camps in its occupation zones there. These held a total of up to 150,000 people, mostly Slavs. Living conditions were very harsh, and the mortality rates in these camps far exceeded those of the camps in Italy.
Unlike Jews in other Axis-aligned countries, no Jews in Italy or Italian-occupied areas were murdered or deported to concentration camps in Germany before September 1943. In the territories occupied by the Italian Army in Greece, France and Yugoslavia, Jews even found protection from persecution. The Italian Army actively protected Jews in occupation zones, to the frustration of Nazi Germany, and to the point where the Italian sector in Croatia was referred to as the "Promised Land". Up to September 1943, Germany made no serious attempt to force Mussolini and Fascist Italy into handing over Italian Jews. It was nevertheless irritated with the Italian refusal to arrest and deport its Jewish population, feeling it encouraged other countries allied with the Axis powers to refuse as well.
On July 25, 1943, with the fall of the Fascist Regime and the arrest of Benito Mussolini, the situation in the Italian concentration camps changed. Inmates, including Jewish prisoners, were gradually released. However, this process was not completed by the time German authorities took over the camps in northern-central Italy on September 8, 1943. Luckily, hundreds of Jewish refugees who were imprisoned in the major camps in the South (Campagna internment camp and Ferramonti di Tarsia) were liberated by the Allies before the arrival of the Germans, but 43,000 Jews (35,000 Italians and 8,000 refugees from other countries) were trapped in the territories now under control of the Italian Social Republic.
In general, the fate and persecution of Jews in Italy between 1938 and 1943 has received only very limited attention in the Italian media. Lists of Jews drawn up to enforce the racial laws would be used to round them up after the Italian surrender on September 8, 1943.
The Holocaust in Italy
The murdering of Jews in Italy began on September 8, 1943, after German troops seized control of Northern and Central Italy, freed Benito Mussolini from prison and installed him as the head of the puppet state of the Italian Social Republic.
Organisation
Tasked with overseeing SS operations and, thereby, the final solution, the genocide of the Jews, was SS-Obergruppenführer Karl Wolff, who was appointed as the highest SS and Police leader in Italy. Wolff assembled a group of SS personnel under him with vast experience in the extermination of Jews in Eastern Europe. Odilo Globocnik, appointed as police leader for the coastal area, was responsible for the murder of hundreds of thousands of Jews and Gypsies in Lublin, Poland, before being sent to Italy. Karl Brunner was appointed as SS and police leader in Bolzano, South Tyrol, Willy Tensfeld in Monza for upper and western Italy and Karl-Heinz Bürger was placed in charge of anti-partisan operations.
The security police and the Sicherheitsdienst (SD) came under the command of Wilhelm Harster, based in Verona. He had held the same position in the Netherlands. Theodor Dannecker, previously active in the deportation of Greek Jews in the part of Greece occupied by Bulgaria, was made chief of the Judenreferat of the SD and was tasked with the deportation of the Italian Jews. Not seen as efficient enough, he was replaced by Friedrich Boßhammer, who like Dannecker, was closely associated with Adolf Eichmann.
Martin Sandberger was appointed as the head of the Gestapo in Verona and played a vital role in the arrest and deportation of the Italian Jews.
As in other German-occupied areas, and in the Reich Main Security Office itself, the persecution of the Nazis' undesirable minorities and political opponents fell under Section IV of the Security Police and SD. In turn, Section IV was subdivided into further departments, of which department IV–4b was responsible for Jewish affairs. Dannecker, then Boßhammer headed this department.
The Congress of Verona
The attitude of the Italian Fascists towards Italian Jews changed drastically in November 1943, after the Fascist authorities declared them to be of "enemy nationality" during the Congress of Verona and began to participate actively in the prosecution and arrest of Jews. Initially, after the Italian surrender, the Italian police had only assisted in the round-up of Jews when requested to do so by German authorities. With the Manifest of Verona, in which Jews were declared foreigners, and in times of war enemies, this changed. Police Order No. 5 on November 30, 1943, issued by Guido Buffarini Guidi, minister of the interior of the RSI, ordered the Italian police to arrest Jews and confiscate their property. This order, however, exempted Jews over the age of 70 or of mixed marriages, which frustrated the Germans who wanted to arrest and deport all Italian Jews.
Deportation and murder
The arrest and deportation of Jews in German-occupied Italy can be separated into two distinct phases. The first, under Dannecker, from September 1943 to January 1944, saw mobile Einsatzkommandos target Jews in major Italian cities. The second phase took place under Boßhammer, who had replaced Dannecker in early 1944. Boßhammer set up a centralised persecution system, using all available German and Fascist Italian police resources, to arrest and deport Italian Jews.
The arrest of Jewish Italians and Jewish refugees began almost immediately after the surrender, in October 1943. This took place in all major Italian cities under German control, albeit with limited success. The Italian police offered little cooperation, and ninety percent of Rome's 10,000 Jews escaped arrest. Arrested Jews were taken to the transit camps at Borgo San Dalmazzo, Fossoli and Bolzano, and from there to Auschwitz. Of the 4,800 deported from the camps by the end of 1943 only 314 survived.
Approximately half of all Jews arrested during the Holocaust in Italy were arrested in 1944 by the Italian police.
Altogether, by the end of the war, almost 8,600 Jews from Italy and Italian-controlled areas in France and Greece were deported to Auschwitz; all but 1,000 were murdered. Only 506 were sent to other camps (Bergen-Belsen, Buchenwald, Ravensbrück, and Flossenbürg) as hostages or political prisoners. Among them were a few hundred Jews from Libya, an Italian colony before the war, who had been deported to mainland Italy in 1942, and were sent to Bergen-Belsen concentration camp. Most of them held British and French citizenship and most survived the war.
A further 300 Jews were shot or died of other causes in transit camps in Italy. Of those executed in Italy, almost half were killed at the Ardeatine massacre in March 1944 alone. The 1st SS Panzer Division Leibstandarte SS Adolf Hitler killed over 50 Jewish civilians, refugees and Italian nationals, at the Lake Maggiore massacres—the first massacres of Jews by Germany in Italy during the war. These were committed immediately after the Italian surrender, and the bodies sunk in the lake. This occurred despite strict orders at the time not to commit any violence against the civilian population.
In the nineteen months of German occupation, from September 1943 to May 1945, twenty percent of Italy's pre-war Jewish population was killed by the Nazis. The actual Jewish population in Italy during the war was, however, higher than the initial 40,000 as the Italian government had evacuated 4,000 Jewish refugees from its occupation zones to southern Italy alone. By September 1943, 43,000 Jews were present in northern Italy and, by the end of the war, 40,000 Jews in Italy had survived the Holocaust.
Romani people
Unlike Italian Jews, the Romani people faced discrimination by Fascist Italy almost from the start of the regime. In 1926 it ordered that all "foreign Gypsies" should be expelled from the country and, from September 1940, Romani people of Italian nationality were held in designated camps. With the start of the German occupation many of these camps came under German control. The impact the German occupation had on the Romani people in Italy has seen little research. The number of Romani who died in Italian camps or were deported to concentration camps is uncertain. The number of Romani people who died from hunger and exposure during the Fascist Italian period is also unknown but is estimated to be in the thousands.
While Italy observes January 27 as Remembrance Day for the Holocaust and its Jewish Italian victims, efforts to extend this official recognition to the Italian Romani people killed by the Fascist regime, or deported to extermination camps, have been rejected.
Role of the Catholic Church and the Vatican
Before the Raid of the Ghetto of Rome Germany had been warned that such an action could raise the displeasure of Pope Pius XII, but the pope never spoke out against the deportation of the Jews of Rome during the war, something that has since sparked controversy. At the same time, members of the Catholic Church provided assistance to Jews and helped them survive the Holocaust in Italy.
Looting of Jewish property
Apart from the extermination of the Jews, Nazi Germany was also extremely interested in appropriating Jewish property. A 2010 estimate set the value of Jewish property looted in Italy during the Holocaust between 1943 and 1945 at US$1 billion.
Among the most priceless artifacts lost this way are the contents of the Biblioteca della Comunità Israelitica and the Collegio Rabbinico Italiano, the two Jewish libraries in Rome. Of the former, all of its contents remain missing, while some of the latter's contents were returned after the war.
Weeks before the Raid of the Ghetto of Rome, Herbert Kappler forced Rome's Jewish community to hand over 50 kilograms (110 lb) of gold in exchange for safety. Despite doing so on September 28, 1943, over 1,000 of its members were arrested on October 16 and deported to Auschwitz where all but 16 died.
Perpetrators
Very few German or Italian perpetrators of the Holocaust in Italy were tried or jailed after the war.
Post-war trials
Of the war crimes committed by the Nazis in Italy the Ardeatine massacre saw arguably the most perpetrators convicted. High-ranking Wehrmacht officials Albert Kesselring, field marshal and commander of all Axis forces in the Mediterranean theatre, Eberhard von Mackensen, commander of the 14th German Army and Kurt Mälzer, military commander of Rome, were all sentenced to death. They were pardoned and released in 1952; Mälzer died before he could be released. Of the perpetrators from the SS, police chief of Rome Herbert Kappler was sentenced in 1948 but latter escaped jail. Erich Priebke and Karl Hass long escaped justice but were eventually tried in 1997.
Theodor Dannecker, in charge of the Judenreferat in Italy, committed suicide after being captured in December 1945, thereby avoiding a possible trial. His successor, Friedrich Boßhammer, disappeared at the end of the war in 1945 and subsequently worked as a lawyer in Wuppertal. He was arrested in West Germany in 1968 and eventually sentenced to life in prison for his involvement in the deportation of 3,300 Jews from Italy to Auschwitz. During the Holocaust almost 8,000 of the 45,000 Jews living in Italy perished. During his trial over 200 witnesses were heard before he was sentenced in April 1972. He died a few months after the verdict without having spent any time in prison.
Karl Friedrich Titho's role as camp commander at the Fossoli di Carpi Transit Camp and the Bolzano Transit Camp in the deportation of Jewish camp inmates to Auschwitz was investigated by the state prosecutor in Dortmund, Germany, in the early 1970s. The investigation was eventually terminated because it could not be proven that Titho knew the Jews deported to Auschwitz would be killed there and that, given the late state of the war, they were killed at all. He was also tried for the execution of 67 prisoners as reprisal for a partisan attack. It was ruled that this did not classify as being murder but, at most, as manslaughter. As such the charge had exceeded the statute of limitations. The two heads of the department investigating Titho had been members of the Nazi Party from an early date.
In 1964, six members of the Leibstandarte division were charged with the Lago Maggiore massacre, carried out near Meina, as the statute of limitation laws in Germany at the time, twenty years for murder, meant the perpetrators could soon no longer be prosecuted. All the accused were found guilty, and three received life sentences for murder. Two others received a jail sentence of three years for having been accessories to the murders, while the sixth one died during the trial. The sentences were appealed and Germany's highest court, the Bundesgerichtshof, while not overturning the guilty verdict, ruled that the perpetrators had to be freed on a technicality. As the crimes had been committed in 1943 and were investigated by the division at that time without a conclusion, the usual start date for the statute of limitations for Nazi crimes, the date of the German surrender in 1945, did not apply. Since the defendants were charged more than twenty years after the 1943 massacre, the statute of limitations had expired.
This verdict caused much frustration for a younger generation of German state prosecutors who were interested in prosecuting Nazi crimes and their perpetrators. The ruling by the Bundesgerichtshof had further repercussions. It stated perpetrators could only be charged with murder if direct involvement in killing could be proven. In any other cases the charge could only be manslaughter. This meant that after 1960, under German law, the statute of limitations for manslaughter crimes had expired.
In 1969 Germany revoked the statute of limitations for murder altogether, allowing direct murder charges to be prosecuted indefinitely. This was not always applied to Nazi war crimes which were judged by pre-1969 laws. Some like Wolfgang Lehnigk-Emden escaped a jail sentence despite having been found guilty in the case of the Caiazzo massacre.
Italian role in the Holocaust
The role of Italians as collaborators of the Germans in the Holocaust in Italy has rarely been reflected upon in the country after World War II. A 2015 book by Simon Levis Sullam, a professor of modern history at the Ca' Foscari University of Venice, titled The Italian Executioners: The Genocide of the Jews of Italy examined the role of Italians in the genocide and found half of the Italian Jews killed in the Holocaust were arrested by Italians and not Germans. Many of these arrests could only be carried out because of tip-offs by civilians. Sullam argued that Italy ignored what he called its "era of the executioner", rehabilitated Italian participants in the Holocaust through a 1946 amnesty, and continued to focus on its role as saviours of the Jews rather than to reflect on the persecution Jews suffered in Fascist Italy.
Michele Sarfatti, one of most important historians of Italian Jewry in the country, stated his believe that, up until the 1970s, Italians generally believed their country was not involved in the Holocaust, and that it was exclusively the work of the German occupiers instead. This only began to change in the 1990s after the publication of Il Libro Della Memoria by Jewish-Italian historian Liliana Picciotto, and the Italian Racial Laws in book form in the early 2000s. These laws highlighted the fact that Italy's anti-Semitic laws were distinctly independent from those in Nazi Germany and, in some instances, more severe than the early anti-Semitic laws Germany had enacted.
Memoriale della Shoah
The Memoriale della Shoah is a Holocaust memorial in Milano Centrale railway station, dedicated to the Jewish people deported from a secret platform underneath the station to the extermination camps. It was opened in January 2013.
Borgo San Dalmazzo camp
No trace remains of the former Borgo San Dalmazzo concentration camp, but two monuments were erected to mark the events that took place there. In 2006 a memorial was erected at the Borgo San Dalmazzo railway station to commemorate the deportations. The memorial contains the names, ages and countries of origin of the victims as well as those of the few survivors. It also has some freight cars of the type used in the deportations.
Fossoli Camp
In 1996 a foundation was formed to preserve the former camp. From 1998 to 2003 volunteers rebuilt the fencing around the Campo Nuovo and, in 2004, one of the barracks that was used to house Jewish inmates was reconstructed.
Italian Righteous Among the Nations
As of 2018, 694 Italians have been recognised as Righteous Among the Nations, an honorific used by the State of Israel to describe non-Jews who risked their lives during the Holocaust to save Jews from extermination by the Nazis.
The first Italians to be honoured in this fashion were Don Arrigo Beccari, Doctor Giuseppe Moreali and Ezio Giorgetti in 1964. Arguably the most famous of these is cyclist Gino Bartali, winner of the 1938 and 1948 Tour de France, who was honoured posthumously for his role in saving Italian Jews during the Holocaust in 2014, never having spoken about it during his lifetime. |
Senior Communist Party of China (CPC) official Liu Yunshan said Wednesday that the party's "mass line" education campaign should follow strict standards and allow no formalism.
Liu, a member of the Standing Committee of the Political Bureau of the CPC Central Committee, made the remarks at a two-day meeting concerning supervision for the campaign.
The "mass line" refers to a guideline under which CPC officials and members are required to strengthen ties with the public and work for the people's interests.
Liu, also head of the campaign's leading team, said inspectors should discover potential problems in their work to ensure that the campaign will create real and satisfactory results.
Inspectors should use strict standards and discipline to make sure the campaign is properly implemented, he said.
Inspectors should not attend banquets, accept gifts or visit tourist sites during their inspections, Liu said.
Zhao Leji, deputy head of the campaign's leading team, said the inspectors must be truthful, communicate in a timely way and take their duties seriously.
Members of the leading team, as well as representatives from 45 central supervisory teams, attended Wednesday's meeting. |
Thracian Mounds in Bulgaria: Heritage at Risk Abstract This paper illustrates the current state of Thracian mounds in Bulgaria, reviewing its theory and practice from a new heritage perspective, grounded in the field of Heritage studies. It points out the critical need of reversing the present state of mis-protection and mismanagement that results in cultural disinheritance, mass looting and destruction. It is argued that the preservation and development of Thracian sites and landscapes is enhanced by the ongoing heritization process of ascribing multiple values, wider definitions and roles of cultural heritage in society. This recently acquired heritage significance has activated a growing awareness and interest on the side of scientists from diverse fields such as archaeology and heritage studies, spatial planning and geography, medicine and biology, genetic studies, ethnography, linguistics and religious studies, claiming their right to explore and contribute new evidence of the historical, cultural and ethnic continuity between ancient Thracians and present day Bulgarians. Due to the increasing integration of sciences and synthesis of knowledge, links in value systems, toponyms, language, folklore, traditions and rituals, as well as genetics, become more visible, promising to restore a lost collective identity and sustain a valuable ancestral heritage. |
Why might they be giants? Towards an understanding of polar gigantism Summary Beginning with the earliest expeditions to the poles, over 100 years ago, scientists have compiled an impressive list of polar taxa whose body sizes are unusually large. This phenomenon has become known as polar gigantism. In the intervening years, biologists have proposed a multitude of hypotheses to explain polar gigantism. These hypotheses run the gamut from invoking release from physical and physiological constraints, to systematic changes in developmental trajectories, to community-level outcomes of broader ecological and evolutionary processes. Here we review polar gigantism and emphasize two main problems. The first is to determine the true strength and generality of this pattern: how prevalent is polar gigantism across taxonomic units? Despite many published descriptions of polar giants, we still have a poor grasp of whether these species are unusual outliers or represent more systematic shifts in distributions of body size. Indeed, current data indicate that some groups show gigantism at the poles whereas others show nanism. The second problem is to identify underlying mechanisms or processes that could drive taxa, or even just allow them, to evolve especially large body size. The contenders are diverse and no clear winner has yet emerged. Distinguishing among the contenders will require better sampling of taxa in both temperate and polar waters and sustained efforts by comparative physiologists and evolutionary ecologists in a strongly comparative framework. |
//
// Created by <NAME> on 2021-07-12.
//
#include "RESTAPI_FMSObjects.h"
#include "framework/MicroService.h"
using OpenWifi::RESTAPI_utils::field_to_json;
using OpenWifi::RESTAPI_utils::field_from_json;
namespace OpenWifi::FMSObjects {
void Firmware::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj, "id", id);
field_to_json(Obj, "release", release);
field_to_json(Obj, "deviceType", deviceType);
field_to_json(Obj, "description", description);
field_to_json(Obj, "revision", revision);
field_to_json(Obj, "uri", uri);
field_to_json(Obj, "image", image);
field_to_json(Obj, "imageDate", imageDate);
field_to_json(Obj, "size", size);
field_to_json(Obj, "downloadCount", downloadCount);
field_to_json(Obj, "firmwareHash", firmwareHash);
field_to_json(Obj, "owner", owner);
field_to_json(Obj, "location", location);
field_to_json(Obj, "uploader", uploader);
field_to_json(Obj, "digest", digest);
field_to_json(Obj, "latest", latest);
field_to_json(Obj, "notes", notes);
field_to_json(Obj, "created", created);
};
bool Firmware::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj, "id", id);
field_from_json(Obj, "release", release);
field_from_json(Obj, "deviceType", deviceType);
field_from_json(Obj, "description", description);
field_from_json(Obj, "revision", revision);
field_from_json(Obj, "uri", uri);
field_from_json(Obj, "image", image);
field_from_json(Obj, "imageDate", imageDate);
field_from_json(Obj, "size", size);
field_from_json(Obj, "downloadCount", downloadCount);
field_from_json(Obj, "firmwareHash", firmwareHash);
field_from_json(Obj, "owner", owner);
field_from_json(Obj, "location", location);
field_from_json(Obj, "uploader", uploader);
field_from_json(Obj, "digest", digest);
field_from_json(Obj, "latest", latest);
field_from_json(Obj, "notes", notes);
field_from_json(Obj, "created", created);
return true;
} catch (...) {
}
return true;
}
void FirmwareList::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj,"firmwares",firmwares);
}
bool FirmwareList::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj, "firmwares", firmwares);
return true;
} catch (...) {
}
return false;
}
void DeviceType::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj, "id", id);
field_to_json(Obj, "deviceType", deviceType);
field_to_json(Obj, "manufacturer", manufacturer);
field_to_json(Obj, "model", model);
field_to_json(Obj, "policy", policy);
field_to_json(Obj, "notes", notes);
field_to_json(Obj, "lastUpdate", lastUpdate);
field_to_json(Obj, "created", created);
field_to_json(Obj, "id", id);
field_to_json(Obj, "id", id);
field_to_json(Obj, "id", id);
}
bool DeviceType::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj, "id", id);
field_from_json(Obj, "deviceType", deviceType);
field_from_json(Obj, "manufacturer", manufacturer);
field_from_json(Obj, "model", model);
field_from_json(Obj, "policy", policy);
field_from_json(Obj, "notes", notes);
field_from_json(Obj, "lastUpdate", lastUpdate);
field_from_json(Obj, "created", created);
field_from_json(Obj, "id", id);
field_from_json(Obj, "id", id);
field_from_json(Obj, "id", id);
return true;
} catch (...) {
}
return false;
}
void DeviceTypeList::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj,"deviceTypes", deviceTypes);
}
bool DeviceTypeList::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj,"deviceTypes", deviceTypes);
return true;
} catch(...) {
}
return false;
}
void RevisionHistoryEntry::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj, "id", id);
field_to_json(Obj, "serialNumber", serialNumber);
field_to_json(Obj, "fromRelease", fromRelease);
field_to_json(Obj, "toRelease", toRelease);
field_to_json(Obj, "commandUUID", commandUUID);
field_to_json(Obj, "revisionId", revisionId);
field_to_json(Obj, "upgraded", upgraded);
}
bool RevisionHistoryEntry::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj, "id", id);
field_from_json(Obj, "serialNumber", serialNumber);
field_from_json(Obj, "fromRelease", fromRelease);
field_from_json(Obj, "toRelease", toRelease);
field_from_json(Obj, "commandUUID", commandUUID);
field_from_json(Obj, "revisionId", revisionId);
field_from_json(Obj, "upgraded", upgraded);
return true;
} catch(...) {
}
return false;
}
void RevisionHistoryEntryList::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj,"deviceTypes", history);
}
bool RevisionHistoryEntryList::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj,"deviceTypes", history);
return true;
} catch(...) {
}
return false;
}
void FirmwareAgeDetails::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj,"latestId", latestId);
field_to_json(Obj,"image", image);
field_to_json(Obj,"imageDate", imageDate);
field_to_json(Obj,"revision", revision);
field_to_json(Obj,"uri", uri);
field_to_json(Obj,"age", age);
field_to_json(Obj,"latest",latest);
}
bool FirmwareAgeDetails::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj,"latestId", latestId);
field_from_json(Obj,"image", image);
field_from_json(Obj,"imageDate", imageDate);
field_from_json(Obj,"revision", revision);
field_from_json(Obj,"uri", uri);
field_from_json(Obj,"age", age);
field_from_json(Obj,"latest", latest);
return true;
} catch(...) {
}
return false;
}
void DeviceConnectionInformation::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj, "serialNumber", serialNumber);
field_to_json(Obj, "revision", revision);
field_to_json(Obj, "deviceType", deviceType);
field_to_json(Obj, "endPoint", endPoint);
field_to_json(Obj, "lastUpdate", lastUpdate);
field_to_json(Obj, "status", status);
}
bool DeviceConnectionInformation::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj, "serialNumber", serialNumber);
field_from_json(Obj, "revision", revision);
field_from_json(Obj, "deviceType", deviceType);
field_from_json(Obj, "endPoint", endPoint);
field_from_json(Obj, "lastUpdate", lastUpdate);
field_from_json(Obj, "status", status);
return true;
} catch(...) {
}
return false;
}
void DeviceReport::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj, "ouis",OUI_);
field_to_json(Obj, "revisions", Revisions_);
field_to_json(Obj, "deviceTypes", DeviceTypes_);
field_to_json(Obj, "status", Status_);
field_to_json(Obj, "endPoints", EndPoints_);
field_to_json(Obj, "usingLatest", UsingLatest_);
field_to_json(Obj, "unknownFirmwares", UnknownFirmwares_);
field_to_json(Obj,"snapshot",snapshot);
field_to_json(Obj,"numberOfDevices",numberOfDevices);
field_to_json(Obj, "totalSecondsOld", totalSecondsOld_);
}
void DeviceReport::reset() {
OUI_.clear();
Revisions_.clear();
DeviceTypes_.clear();
Status_.clear();
EndPoints_.clear();
UsingLatest_.clear();
UnknownFirmwares_.clear();
totalSecondsOld_.clear();
numberOfDevices = 0 ;
snapshot = std::time(nullptr);
}
bool DeviceReport::from_json([[maybe_unused]] const Poco::JSON::Object::Ptr &Obj) {
try {
return true;
} catch (...) {
}
return false;
}
void DeviceInformation::to_json(Poco::JSON::Object &Obj) const {
field_to_json(Obj, "serialNumber",serialNumber);
field_to_json(Obj, "history", history);
field_to_json(Obj, "currentFirmware", currentFirmware);
field_to_json(Obj, "currentFirmwareDate", currentFirmwareDate);
field_to_json(Obj, "latestFirmware", latestFirmware);
field_to_json(Obj, "latestFirmwareDate", latestFirmwareDate);
field_to_json(Obj, "latestFirmwareAvailable",latestFirmwareAvailable);
field_to_json(Obj, "latestFirmwareURI",latestFirmwareURI);
}
bool DeviceInformation::from_json(const Poco::JSON::Object::Ptr &Obj) {
try {
field_from_json(Obj, "serialNumber",serialNumber);
field_from_json(Obj, "history", history);
field_from_json(Obj, "currentFirmware", currentFirmware);
field_from_json(Obj, "currentFirmwareDate", currentFirmwareDate);
field_from_json(Obj, "latestFirmware", latestFirmware);
field_from_json(Obj, "latestFirmwareDate", latestFirmwareDate);
field_from_json(Obj, "latestFirmwareAvailable",latestFirmwareAvailable);
field_from_json(Obj, "latestFirmwareURI",latestFirmwareURI);
return true;
} catch(...) {
}
return false;
}
}
|
Samsung announced its new flagship smartphone Monday with feature improvements aimed at making its devices more indispensable to daily life, rather than adding crazy new features to make its devices stand out.
The Galaxy S5, introduced at a news conference at the Mobile World Congress, sports a new look and some new colors such as blue and gold. But it didn’t deliver on the craziest hopes that Samsung fans had for the phone. In the news conference, the company said that it did, however, shore up the key features that mattered most to its customers. These include improvements to the camera, which now has a 16-megapixel sensor, as well as making improvements to the phone’s speed and battery life.
These practical improvements are meant to solve everyday problems, such as that horrible sinking feeling you get when you phone dips below a 10 percent charge. Even in that situation, the company boasted that the S5 is able to last another 24 hours on standby mode. That leaves you a lot of time to find an outlet and enough juice to get an emergency call or text from your friends and family.
The S5 is also water-resistant — you can stand a rain shower, though not a scuba-diving session — and also has a fingerprint scanner. Users can scan their digits to unlock the phone, gain access to a separate section of the phone used for personal files and photos and, in some cases, as validation for mobile payment.
Design-wise, Samsung has changed the back of the smartphone to a new, perforated texture that looks kind of like an adhesive bandage.
Samsung also increased its focus on making its devices into workout companions. In the S5, that means the inclusion of a heart-rate monitor — something that’s also included in its next-generation wearable products, which were also revealed Monday.
An all-new wearable device, the Gear Fit, sports a curved screen that hugs your wrist and finally produces a practical application for Samsung’s much-anticipated flexible screens. The Gear Fit is more like a Pebble smartwatch than the Galaxy Gear: It can get notifications from your phone, but it doesn’t have a microphone or speakers to act as call conduit. Its long, thin 1.84-inch screen will display updates from the device’s heart monitor and pedometer.
Samsung also updated its smartwatch line to be more stylish, and not just because it dropped the “Galaxy” from the line’s name. The Gear 2 and Gear 2 Neo are both a bit more sensible than the Galaxy Gear, given that they don’t have any wiring in the band. That opens Samsung to offer more fashionable bands for its watches. That’s a design option that cuts down on the watches’ bulk and can only work in the watches’ favor.
The Gear Neo does not have a camera, which should help with the devices’ bulkiness as well. All three devices have a longer battery life than the 24-hour range on the Galaxy Gear — another key improvement for Samsung if it’s looking to pick up adoption.
The wearable devices all run Samsung’s Tizen rather than Google’s Android mobile operating system. This isn’t as much of a problem as it may seem for the app development world, since there weren’t that many Android apps built to run on the Galaxy Gear. Still, Samsung will have to be aggressive about adding useful apps if it wants be at the center of the smartwatch revolution. |
/* ************************************************************************** */
/* */
/* ::: :::::::: */
/* init_bonus.c :+: :+: :+: */
/* +:+ +:+ +:+ */
/* By: jofelipe <<EMAIL>> +#+ +:+ +#+ */
/* +#+#+#+#+#+ +#+ */
/* Created: 2021/12/27 21:28:32 by jofelipe #+# #+# */
/* Updated: 2022/01/23 12:01:32 by jofelipe ### ########.fr */
/* */
/* ************************************************************************** */
#include "philo_bonus.h"
t_args *init_args(int argc, char **argv)
{
t_args *args;
args = malloc(sizeof(t_args));
args->argc = argc;
args->simulation_done = false;
args->max_philo = ft_atoi(argv[1]);
args->time_to_die = ft_atoi(argv[2]);
args->time_to_eat = ft_atoi(argv[3]);
args->time_to_sleep = ft_atoi(argv[4]);
args->pids = malloc(sizeof(int) * args->max_philo);
return (args);
}
t_philo *init_philo(char **argv, t_args *args, t_philo *philo, int i)
{
philo = malloc(sizeof(t_philo));
philo->id = i + 1;
philo->meals = 0;
philo->dead = false;
philo->args = args;
if (args->argc == 6)
{
philo->args->meals_arg = true;
philo->args->max_meals = ft_atoi(argv[5]);
}
else
{
philo->args->meals_arg = false;
philo->args->max_meals = 2147483647;
}
return (philo);
}
void init_sem(t_sem *sem, char **argv)
{
sem->forks = sem_open("farol", O_CREAT, 0777, ft_atoi(argv[1]));
sem->msgs = sem_open("msg", O_CREAT, 0777, 1);
sem->table = sem_open("table", O_CREAT, 0777, ft_atoi(argv[1]) - 2);
sem->finish = sem_open("finish", O_CREAT, 0777, 1);
}
t_philo **init(t_philo **philo, int argc, char **argv)
{
t_args *args;
t_sem *sem;
int i;
sem = malloc(sizeof(t_sem));
init_sem(sem, argv);
args = init_args(argc, argv);
i = -1;
while (++i < args->max_philo)
{
philo[i] = init_philo(argv, args, philo[i], i);
philo[i]->sem = sem;
}
return (philo);
}
|
The role of basic blood parameters in determining the viability of intestinal tissue in incarcerated hernias Abdominal hernia repair is a common surgery, with incarcerated hernias accounting for 15% of all cases. In these cases, early diagnosis of intestinal ischaemia and necrosis is crucial to prevent mortality and morbidity. Biomarkers that can predict ischaemic or necrotic status are of vital importance. The aim of this study was to reveal the roles of basic blood parameters in determining ischaemic or necrotic status. |
<gh_stars>10-100
package com.ctrip.zeus.service.build.impl;
import com.ctrip.zeus.dao.entity.NginxConf;
import com.ctrip.zeus.dao.entity.NginxConfExample;
import com.ctrip.zeus.dao.entity.NginxConfSlb;
import com.ctrip.zeus.dao.entity.NginxConfSlbExample;
import com.ctrip.zeus.dao.mapper.NginxConfMapper;
import com.ctrip.zeus.dao.mapper.NginxConfSlbMapper;
import com.ctrip.zeus.model.model.*;
import com.ctrip.zeus.model.nginx.ConfFile;
import com.ctrip.zeus.model.nginx.NginxConfEntry;
import com.ctrip.zeus.model.nginx.Upstreams;
import com.ctrip.zeus.model.nginx.Vhosts;
import com.ctrip.zeus.service.build.BuildInfoService;
import com.ctrip.zeus.service.build.BuildService;
import com.ctrip.zeus.service.build.NginxConfBuilder;
import com.ctrip.zeus.service.version.ConfVersionService;
import com.ctrip.zeus.support.ObjectJsonParser;
import com.ctrip.zeus.support.ObjectJsonWriter;
import com.ctrip.zeus.util.CompressUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import javax.annotation.Resource;
import java.util.*;
/**
* @author:xingchaowang
* @date: 3/15/2015.
*/
@Service("buildService")
public class BuildServiceImpl implements BuildService {
@Resource
private BuildInfoService buildInfoService;
@Resource
private ConfVersionService confVersionService;
@Resource
private NginxConfBuilder nginxConfigBuilder;
@Resource
private NginxConfMapper nginxConfMapper;
@Resource
private NginxConfSlbMapper nginxConfSlbMapper;
private Logger logger = LoggerFactory.getLogger(this.getClass());
@Override
public Long build(Slb nxOnlineSlb, Map<Long, VirtualServer> nxOnlineVses, Set<Long> buildingVsIds,
Set<Long> clearingVsIds, Map<Long, List<TrafficPolicy>> policiesByVsId, Map<Long, List<Group>> groupsByVsId,
Map<Long, Map<Long, Map<Long, Integer>>> drDesSlbByGvses, Map<Long, Dr> drByGroupIds,
Set<String> serversToBeMarkedDown, Set<String> groupMembersToBeMarkedUp, Map<Long, String> canaryIpMap, List<Rule> defaultRules) throws Exception {
int version = buildInfoService.getTicket(nxOnlineSlb.getId());
Long currentVersion = confVersionService.getSlbCurrentVersion(nxOnlineSlb.getId());
String conf = nginxConfigBuilder.generateNginxConf(nxOnlineSlb, defaultRules, null);
NginxConf nginxConf = new NginxConf();
nginxConf.setSlbId(nxOnlineSlb.getId());
nginxConf.setVersion(version);
nginxConf.setContent(conf);
nginxConf.setDatachangeLasttime(new Date());
nginxConfMapper.upsertSelective(nginxConf);
logger.info("Nginx Conf build success! slbId: " + nxOnlineSlb.getId() + ", version: " + version);
// init current conf entry in case of generating conf file for entirely new cluster
NginxConfEntry currentConfEntry = new NginxConfEntry().setUpstreams(new Upstreams()).setVhosts(new Vhosts());
NginxConfSlb d = nginxConfSlbMapper.selectOneByExampleWithBLOBs(new NginxConfSlbExample().createCriteria().andSlbIdEqualTo(nxOnlineSlb.getId())
.andVersionEqualTo(currentVersion).example());
if (d != null) {
currentConfEntry = ObjectJsonParser.parse(CompressUtils.decompress(d.getContent()), NginxConfEntry.class);
}
NginxConfEntry nextConfEntry = new NginxConfEntry().setUpstreams(new Upstreams()).setVhosts(new Vhosts());
Set<String> fileTrack = new HashSet<>();
logger.info("[Model Snapshot Test]Start Build Nginx Conf VsIDs:" + buildingVsIds.toString());
for (Long vsId : buildingVsIds) {
if (clearingVsIds.contains(vsId)) {
continue;
}
VirtualServer virtualServer = nxOnlineVses.get(vsId);
List<Group> groups = groupsByVsId.get(vsId);
if (groups == null) {
groups = new ArrayList<>();
}
logger.info("[Model Snapshot Test]Build Server Conf:" + vsId + ":Groups:" + ObjectJsonWriter.write(groups));
String serverConf = nginxConfigBuilder.generateServerConf(nxOnlineSlb, virtualServer, policiesByVsId.get(vsId), groups, drDesSlbByGvses.get(vsId), drByGroupIds, canaryIpMap, defaultRules, null);
nextConfEntry.getVhosts().addConfFile(new ConfFile().setName("" + virtualServer.getId()).setContent(serverConf));
logger.info("[Model Snapshot Test]Finished Server Conf:");
List<ConfFile> list = nginxConfigBuilder.generateUpstreamsConf(nxOnlineVses.keySet(), virtualServer, groups, serversToBeMarkedDown, groupMembersToBeMarkedUp, fileTrack, defaultRules, nxOnlineSlb);
for (ConfFile cf : list) {
nextConfEntry.getUpstreams().addConfFile(cf);
}
logger.info("[Model Snapshot Test]Finished Build Upstream Conf:");
}
for (ConfFile cf : currentConfEntry.getVhosts().getFiles()) {
try {
Long vsId = Long.parseLong(cf.getName());
if (clearingVsIds.contains(vsId) || buildingVsIds.contains(vsId)) {
continue;
} else {
nextConfEntry.getVhosts().addConfFile(cf);
}
} catch (NumberFormatException ex) {
logger.error("Unable to extract vs id information from vhost file: " + cf.getName() + ".");
}
}
for (ConfFile cf : currentConfEntry.getUpstreams().getFiles()) {
String[] fn = cf.getName().split("_");
boolean add = true;
for (String relatedVsId : fn) {
if (relatedVsId.isEmpty()) continue;
Long vsId = 0L;
try {
vsId = Long.parseLong(relatedVsId);
} catch (NumberFormatException ex) {
add = false;
logger.warn("Unable to extract vs id information from upstream file: " + cf.getName() + ".");
continue;
}
if (clearingVsIds.contains(vsId) || buildingVsIds.contains(vsId)) {
if (add) add = false;
}
}
if (add) {
nextConfEntry.getUpstreams().addConfFile(cf);
}
}
logger.info("[Model Snapshot Test]Start Insert Nginx Conf To DB");
NginxConfSlb toInsert = new NginxConfSlb();
toInsert.setContent(CompressUtils.compress(ObjectJsonWriter.write(nextConfEntry)));
toInsert.setSlbId(nxOnlineSlb.getId());
toInsert.setVersion((long) version);
nginxConfSlbMapper.insert(toInsert);
logger.info("[Model Snapshot Test]Finish Insert Nginx Conf To DB");
return (long) version;
}
@Override
public void rollBackConfig(Long slbId, int version) throws Exception {
nginxConfMapper.deleteByExample(new NginxConfExample().createCriteria().andSlbIdEqualTo(slbId).andVersionGreaterThan(version).example());
nginxConfSlbMapper.deleteByExample(new NginxConfSlbExample().createCriteria().andSlbIdEqualTo(slbId).andVersionGreaterThan((long) version).example());
}
}
|
<filename>algorithm/redis/LRUCache_test.go<gh_stars>1-10
package redis
import (
"github.com/sirupsen/logrus"
"testing"
)
func TestLRUCache(t *testing.T) {
LRUCache := NewLRUCache(3)
LRUCache.Set(1, 11)
LRUCache.Set(2, 22)
LRUCache.Set(3, 33)
LRUCache.Set(4, 44) // 会把1移除掉
LRUCache.Set(5, 55) // 会把1移除掉
LRUCache.Set(6, 66) // 会把1移除掉
for _, v := range LRUCache.m {
logrus.Printf("key:%v value:%v", v.key, v.value)
}
}
|
package com.ilm9001.beatmapvisualiser.ShowElements;
import com.ilm9001.beatmapvisualiser.BeatMapVisualiser;
import org.bukkit.Location;
import org.bukkit.Material;
import org.bukkit.entity.ArmorStand;
import org.bukkit.entity.Entity;
import org.bukkit.entity.EntityType;
import org.bukkit.entity.LivingEntity;
import org.bukkit.inventory.ItemStack;
import org.bukkit.inventory.meta.ItemMeta;
import org.bukkit.util.EulerAngle;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class ShowScreens {
private final BeatMapVisualiser bmv;
private final ScheduledExecutorService sch;
private Entity scrn_l;
private Entity scrn_c; // store a entity reference as 1.17 broke UUID references (Bukkit.getEntity(UUID) doesnt work async anymore!!!!)
private Entity scrn_r;
private ItemStack scrn_stack;
private ItemMeta scrn_meta;
private boolean is_on;
public ShowScreens(BeatMapVisualiser bmv)
{
this.bmv = bmv;
sch = Executors.newScheduledThreadPool(1);
}
@SuppressWarnings("CommentedOutCode")
public void Build(Location loc) {
Location loc_l = loc.clone().add(0, 0.0, +10.5);
Location loc_c = loc.clone();
Location loc_r = loc.clone().add(0, 0.0, -10.5);
/*
bmv.getLogger().info(String.format("Screen loc_l: %s", loc_l.toString()));
bmv.getLogger().info(String.format("Screen loc_c: %s", loc_c.toString()));
bmv.getLogger().info(String.format("Screen loc_r: %s", loc_r.toString()));
*/
scrn_l = bmv.main_world.spawnEntity(loc_l, EntityType.ARMOR_STAND);
scrn_c = bmv.main_world.spawnEntity(loc_c, EntityType.ARMOR_STAND);
scrn_r = bmv.main_world.spawnEntity(loc_r, EntityType.ARMOR_STAND);
((ArmorStand)scrn_l).setRightArmPose(new EulerAngle(0,0,0)); // necessary so screens arent "facing towards the sky" in a weird way
((ArmorStand)scrn_c).setRightArmPose(new EulerAngle(0,0,0));
((ArmorStand)scrn_r).setRightArmPose(new EulerAngle(0,0,0));
((ArmorStand) scrn_l).setInvisible(true);
scrn_l.setInvulnerable(true);
scrn_l.setGravity(false);
scrn_l.setRotation(-90,-90);
((ArmorStand) scrn_c).setInvisible(true);
scrn_c.setInvulnerable(true);
scrn_c.setGravity(false);
scrn_c.setRotation(-90,-90);
((ArmorStand) scrn_r).setInvisible(true);
scrn_r.setInvulnerable(true);
scrn_r.setGravity(false);
scrn_r.setRotation(-90,-90);
scrn_stack = new ItemStack(Material.CARROT_ON_A_STICK);
scrn_meta = scrn_stack.getItemMeta();
set_item(scrn_l, scrn_stack);
set_item(scrn_c, scrn_stack);
set_item(scrn_r, scrn_stack);
// bmv.getLogger().info("Screens built:");
}
public void Dismantle() {
// bmv.getLogger().info("Screens dismantling");
scrn_l.remove(); scrn_l = null;
scrn_c.remove(); scrn_c = null;
scrn_r.remove(); scrn_r = null;
}
public void On() {
// bmv.getLogger().info("Screens ON");
scrn_meta.setCustomModelData(4);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_l, scrn_stack);
scrn_meta.setCustomModelData(5);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_c, scrn_stack);
scrn_meta.setCustomModelData(6);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_r, scrn_stack);
is_on = true;
}
public void Off() {
// bmv.getLogger().info("Screens OFF");
scrn_meta.setCustomModelData(7);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_l, scrn_stack);
scrn_meta.setCustomModelData(8);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_c, scrn_stack);
scrn_meta.setCustomModelData(9);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_r, scrn_stack);
is_on = false;
}
public void Fade() {
// bmv.getLogger().info("Screens FADE");
scrn_meta.setCustomModelData(1);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_l, scrn_stack);
scrn_meta.setCustomModelData(2);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_c, scrn_stack);
scrn_meta.setCustomModelData(3);
scrn_stack.setItemMeta(scrn_meta);
set_item(scrn_r, scrn_stack);
is_on = true;
}
public void FadeOff(long delay) {
Fade();
sch.schedule(new Scrn_set_off(), delay, TimeUnit.MILLISECONDS);
}
public void Run() {
// no-op for now
}
public void Stop() {
// no-op for now
}
public void Flash() {
if(is_on) {
On();
} else {
FadeOff(2000);
}
// no-op for now
}
private class Scrn_set_off implements Runnable {
@Override
public void run()
{
//bmv.getLogger().info("Screens delayed OFF");
Off();
is_on = false;
}
}
private void set_item(Entity ent, ItemStack stack) {
((LivingEntity) ent).getEquipment().setItemInMainHand(stack);
}
}
// EOF
|
Fatigue started to set in for Hays High senior Kate Mondragon as she labored to carry a 180-pound dummy past a set of orange cones.
Decked out in firefighter bunker gear, helmet and oxygen tank, Mondragon paused several times wanting to rest. But all around her, fellow students and instructors encouraged Mondragon to finish, even as her body didn’t want to.
Armed with an extra bit of adrenaline, Mondragon finished dragging the hefty, lifeless body across the way, which allowed her to move onto the next workout station. Such is routine for Mondragon and the class of 12 students who make up Hays CISD’s Firefighter Academy, which is part of the district’s Career and Technology Education (CTE) program.
Freddy Roland, instructor of HCISD’s Firefighter Academy and a current Kyle Fire Department firefighter, said the two-year program allows students to earn certifications in civil service careers. Some of the certifications include the Texas Commission on Fire Protection (TCFP) certification and Emergency Medical Technician. From there, students can potentially find jobs straight out of high school.
“We want to offer something that is tangible, that is there and, if they are interested, they can turn it into a career,” Roland said.
Origins of the program began in 2017 when Suzi Mitchell, HCISD CTE coordinator, met with Kyle Fire Department Chief Kyle Taylor on the possibility of starting a program. Roland, Taylor and Mitchell then observed a similar firefighter academy offered at Austin LBJ, which helped them “straighten out” our ideas, Roland said.
HCISD’s fire academy program incorporates a wide range of certifications that “are going to help them out, no matter if they want to stay on as a firefighter or EMS,” Roland said.
Those certifications extend to jobs in the armed forces or in law enforcement, along with emergency management.
Roland said Austin Community College is grandfathering the program and is also offering it as dual credit. Students who participate pay $250 per semester to be part of the program. Roland said the cost is a “deal,” as trying to obtain similar fire and EMT certification privately can cost close to $6,000.
“Educating kids at a low cost and giving them an opportunity to run equal with those who pay $6,000 to get certifications is a deal,” Roland said.
Keeping the program dual credit is also important as students can obtain as many as 12 to 15 college hours, which they can keep, even if they don’t go into any civil service job.
The course, which encompasses four semesters, offers both traditional and hands-on learning. Roland said the course is “full-blown experience” where students are exposed to the rigors of the civil service industries. Developing future leaders in civil service is the key, Roland said, which is done through the many workouts students in the fire academy accomplish. The experiences are similar to what trainees at fire stations go through.
During the first year, Roland said many students didn’t fully realize the gravity of their course, which has real world and real-life implications.
But for many of the students, getting a chance at a tangible, real-world experience such as the fire academy has been beneficial.
Hays High senior Dallin Roberts said he entered the fire academy after he stopped playing football due to a concussion. Roberts said the academy has since become his primary program.
Lehman High senior Jaime Morales said entering the program was “nervewracking” at first, but was also exciting at the same time. Moreales said it was an “honor” to be a part of the program.
Mondragon, a Hays High junior, said the course doesn’t feel like a “normal class at all.” However, she said going through all of the course work and physical workouts benefits students in the future.
See more photos from Hays CISD’s fire academy here. |
/*
* This widget needs improvements, it works well but is a bit hacky.
*/
public class StatsTable extends View {
private static final String[] COLUMN_NAMES = {"Stat", "Base", "EVs", "IVs", "Total", ""};
private static final float[] COLUMN_WEIGHTS = {0.20f, 0.14f, 0.14f, 0.14f, 0.28f, 0.10f};
private static final String[] STAT_NAMES = {"HP", "Attack", "Defense", "Sp. Atk.", "Sp. Def.", "Speed"};
private static final int BASE = 0;
private static final int EVS = 1;
private static final int IVS = 2;
private static final int TOTAL = 3;
private static final int MAX_EV_SUM = 510;
private float mTextSize;
private int mMargin;
private int mBarThickness;
private int mWidth;
private Rect mRect;
private Paint mPaint;
private Point mMeasurePoint;
private int mDefaultTextColor;
private int mTextColor;
private OnRowClickListener mRowClickListener;
/* HP Atk Def Spa Spd Spe
* Base
* EVs
* IVs
* Total
*/
private int[][] mStatData = new int[4][6];
private int mLevel = 100;
private Nature mNature = Nature.getDEFAULT();
private StringBuilder mStringBuilder = new StringBuilder();
public StatsTable(Context context) {
this(context, null);
}
public StatsTable(Context context, @Nullable AttributeSet attrs) {
this(context, attrs, 0);
}
@SuppressLint("ResourceType")
public StatsTable(Context context, @Nullable AttributeSet attrs, int defStyleAttr) {
super(context, attrs, defStyleAttr);
setClickable(true);
mTextSize = TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_SP, 16, getResources().getDisplayMetrics());
mMargin = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 22, getResources().getDisplayMetrics());
mBarThickness = (int) TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP, 14, getResources().getDisplayMetrics());
mPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
mPaint.setTextSize(mTextSize);
mPaint.setPathEffect(new DashPathEffect(new float[] {3, 3}, 0));
mMeasurePoint = new Point();
mRect = new Rect();
int[] attrIds = {R.attr.colorOnBackground, R.attr.selectableItemBackground};
TypedArray typedArray = context.obtainStyledAttributes(attrIds);
mDefaultTextColor = typedArray.getColor(0, Color.RED);
setBackground(typedArray.getDrawable(1));
typedArray.recycle();
}
public void setLevel(int level) {
mLevel = level;
invalidateData();
}
public void setNature(Nature nature) {
mNature = nature;
invalidateData();
}
public void setBaseStats(Stats baseStats) {
mStatData[BASE] = baseStats.getArray();
invalidateData();
}
public void setEVs(Stats evs) {
mStatData[EVS] = evs.getArray();
invalidateData();
}
public void setIVs(Stats ivs) {
mStatData[IVS] = ivs.getArray();
invalidateData();
}
private void invalidateData() {
for (int i = 0; i < 6; i++) {
if (mStatData[BASE][i] == 0)
mStatData[TOTAL][i] = 0;
else if (i == 0)
mStatData[TOTAL][i] = Stats.calculateHp(mStatData[BASE][i], mStatData[IVS][i],
mStatData[EVS][i], mLevel);
else
mStatData[TOTAL][i] = Stats.calculateStat(mStatData[BASE][i], mStatData[IVS][i],
mStatData[EVS][i], mLevel, mNature.getStatModifier(i));
}
invalidate();
}
public void clear() {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++) {
mStatData[i][j] = 0;
}
}
invalidate();
}
public void setRowClickListener(OnRowClickListener rowClickListener) {
mRowClickListener = rowClickListener;
}
@Override
public void setEnabled(boolean enabled) {
super.setEnabled(enabled);
mTextColor = enabled ? mDefaultTextColor : Color.GRAY;
invalidate();
}
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
int wSize = MeasureSpec.getSize(widthMeasureSpec);
mMeasurePoint.set(wSize, getPaddingTop());
mMeasurePoint.y += measureHeader();
for (int i = 0; i < STAT_NAMES.length; i++) {
mMeasurePoint.y += measureRow(i);
}
mMeasurePoint.y += measureLastLine();
mMeasurePoint.y += getPaddingBottom();
setMeasuredDimension(mMeasurePoint.x, mMeasurePoint.y);
}
private int measureHeader() {
mPaint.setFakeBoldText(true);
mPaint.setColor(mTextColor);
mStringBuilder.setLength(0);
for (String name : COLUMN_NAMES) mStringBuilder.append(name);
mPaint.getTextBounds(mStringBuilder.toString(), 0, mStringBuilder.length(), mRect);
return mRect.height();
}
private int measureRow(int index) {
mPaint.setFakeBoldText(false);
mStringBuilder.setLength(0);
mStringBuilder.append(STAT_NAMES[index]).append(mStatData[0][index]).append(mStatData[1][index])
.append(mStatData[2][index]).append(mStatData[3][index]);
mPaint.getTextBounds(mStringBuilder.toString(), 0, mStringBuilder.length(), mRect);
return mRect.height() + mMargin;
}
private int measureLastLine() {
int sum = 0;
for (int ev : mStatData[EVS]) sum += ev;
if (sum <= MAX_EV_SUM) return 0;
String text = "Too much evs: " + (sum - MAX_EV_SUM);
mPaint.getTextBounds(text, 0, text.length(), mRect);
return mRect.height() + mMargin;
}
@Override
protected void onSizeChanged(int w, int h, int oldw, int oldh) {
super.onSizeChanged(w, h, oldw, oldh);
mWidth = w;
}
@Override
protected void onDraw(Canvas canvas) {
super.onDraw(canvas);
mMeasurePoint.set(0, 0);
mMeasurePoint.y = getPaddingTop();
drawHeaderRow(canvas, mMeasurePoint);
for (int i = 0; i < 6; i++)
drawStatRow(canvas, mMeasurePoint, STAT_NAMES[i], mStatData[0][i], mStatData[1][i],
mStatData[2][i], mStatData[3][i]);
drawLastLine(canvas, mMeasurePoint);
}
private void drawHeaderRow(Canvas canvas, Point measurePoint) {
int width = mWidth - getPaddingStart() - getPaddingEnd();
mPaint.setFakeBoldText(true);
mPaint.setColor(mTextColor);
mStringBuilder.setLength(0);
for (String name : COLUMN_NAMES) mStringBuilder.append(name);
mPaint.getTextBounds(mStringBuilder.toString(), 0, mStringBuilder.length(), mRect);
int baseLine = measurePoint.y - mRect.top;
int lineHeight = mRect.height();
if (measurePoint.y + lineHeight > measurePoint.y)
measurePoint.y += lineHeight;
for (int i = 0; i < COLUMN_NAMES.length; i++) {
int x = getPaddingStart();
for (int j = 0; j < i; j++) x += (int) (width * COLUMN_WEIGHTS[j]);
canvas.drawText(COLUMN_NAMES[i], x, baseLine, mPaint);
}
mPaint.setFakeBoldText(false);
}
private void drawStatRow(Canvas canvas, Point measurePoint, String statName, int base, int evs, int ivs, int total) {
int width = mWidth - getPaddingStart() - getPaddingEnd();
String willDraw = statName + base + evs + ivs + total;
mPaint.getTextBounds(willDraw, 0, willDraw.length(), mRect);
int rowY = measurePoint.y;
rowY += mMargin; // Add top margin
int baseLine = rowY - mRect.top;
int lineHeight = mRect.height();
if (measurePoint.y + lineHeight + mMargin > measurePoint.y)
measurePoint.y += lineHeight + mMargin;
int x = getPaddingStart();
mPaint.setColor(Color.GRAY);
canvas.drawText(statName, x, baseLine, mPaint);
int lx = x + (int) mPaint.measureText(statName);
x += (int) (COLUMN_WEIGHTS[0] * width);
mPaint.setColor(Color.GRAY);
canvas.drawLine(lx, baseLine, x, baseLine, mPaint);
String text = Integer.toString(base);
mPaint.setColor(mTextColor);
canvas.drawText(text, x, baseLine, mPaint);
lx = x + (int) mPaint.measureText(text);
x += (int) (COLUMN_WEIGHTS[1] * width);
mPaint.setColor(Color.GRAY);
canvas.drawLine(lx, baseLine, x, baseLine, mPaint);
text = Integer.toString(evs);
mPaint.setColor(mTextColor);
canvas.drawText(text, x, baseLine, mPaint);
lx = x + (int) mPaint.measureText(text);
x += (int) (COLUMN_WEIGHTS[2] * width);
mPaint.setColor(Color.GRAY);
canvas.drawLine(lx, baseLine, x, baseLine, mPaint);
text = Integer.toString(ivs);
mPaint.setColor(mTextColor);
canvas.drawText(text, x, baseLine, mPaint);
lx = x + (int) mPaint.measureText(text);
x += (int) (COLUMN_WEIGHTS[3] * width);
mPaint.setColor(Color.GRAY);
canvas.drawLine(lx, baseLine, x, baseLine, mPaint);
int maxWidth = (int) (COLUMN_WEIGHTS[4] * width) - mMargin;
int barWidth = (int) (Math.min((total / 504f), 1f) * maxWidth);
if (statName.equalsIgnoreCase("hp")) barWidth = (int) (Math.min((total / 704f), 1f) * maxWidth);
mPaint.setColor(getStatColor(total));
canvas.drawRect(x, baseLine - mBarThickness, x + barWidth, baseLine, mPaint);
lx = x + barWidth;
text = Integer.toString(total);
mPaint.setColor(mTextColor);
mPaint.setFakeBoldText(true);
float dx = mPaint.measureText(text);
canvas.drawText(text, width - dx, baseLine, mPaint);
mPaint.setFakeBoldText(false);
mPaint.setColor(Color.GRAY);
canvas.drawLine(lx, baseLine, width - dx, baseLine, mPaint);
}
private int getStatColor(int s) {
int h = (int) (s * 180f / 714f);
if (h > 360) h = 360;
float[] rgb = Utils.hslToRgb(h, 40, 75);
return Utils.rgb(rgb[0], rgb[1], rgb[2]);
}
private void drawLastLine(Canvas canvas, Point measurePoint) {
int sum = 0;
for (int ev : mStatData[EVS]) sum += ev;
if (sum <= MAX_EV_SUM) return;
String text = "Too much evs: " + (sum - MAX_EV_SUM);
mPaint.setColor(Colors.RED);
mPaint.setFakeBoldText(false);
mPaint.getTextBounds(text, 0, text.length(), mRect);
canvas.drawText(text, measurePoint.y - mRect.top, mWidth - mRect.width() - mRect.right, mPaint);
}
private Rect mTouchedBounds = new Rect();
private int mTouchedRowIndex = -1;
@SuppressLint("ClickableViewAccessibility")
@Override
public boolean onTouchEvent(MotionEvent event) {
if (event.getAction() == MotionEvent.ACTION_DOWN) {
Drawable bg = getBackground();
int touchY = (int) event.getY();
int y = measureHeader() + getPaddingTop();
if (touchY < y) {
mTouchedBounds.set(0, 0, 0, 0);
bg.setBounds(mTouchedBounds);
mTouchedRowIndex = -1;
return super.onTouchEvent(event);
}
for (int i = 0; i < STAT_NAMES.length; i++) {
int rowHeight = measureRow(i);
if (touchY < y + rowHeight + mMargin / 4) {
mTouchedBounds.set(0, y + mMargin / 4, getWidth(), y + rowHeight + mMargin / 4);
bg.setBounds(mTouchedBounds);
mTouchedRowIndex = i;
return super.onTouchEvent(event);
}
y += rowHeight;
}
// Off table
mTouchedBounds.set(0, 0, 0, 0);
bg.setBounds(mTouchedBounds);
mTouchedRowIndex = -1;
} else if (event.getAction() == MotionEvent.ACTION_UP) {
if (mTouchedBounds.contains((int) event.getX(), (int) event.getY()))
if (mRowClickListener != null)
mRowClickListener.onRowClicked(this, STAT_NAMES[mTouchedRowIndex], mTouchedRowIndex);
}
return super.onTouchEvent(event);
}
public interface OnRowClickListener {
public void onRowClicked(StatsTable statsTable, String rowName, int rowIndex);
}
//TODO total evs
} |
/*
Bacula(R) - The Network Backup Solution
Copyright (C) 2000-2016 <NAME>
The original author of Bacula is <NAME>, with contributions
from many others, a complete list can be found in the file AUTHORS.
You may use this file and others of this release according to the
license defined in the LICENSE file, which includes the Affero General
Public License, v3.0 ("AGPLv3") and some additional permissions and
terms pursuant to its AGPLv3 Section 7.
This notice must be preserved when any source code is
conveyed and/or propagated.
Bacula(R) is a registered trademark of Kern Sibbald.
*/
/*
* JobPlots Class
*
* <NAME>, March 2007
*
*/
#include "bat.h"
#if QT_VERSION >= 0x050000
#include <QtWidgets>
#else
#include <QtGui>
#endif
#include "util/comboutil.h"
#include "jobgraphs/jobplot.h"
JobPlotPass::JobPlotPass()
{
use = false;
}
JobPlotPass& JobPlotPass::operator=(const JobPlotPass &cp)
{
use = cp.use;
recordLimitCheck = cp.recordLimitCheck;
daysLimitCheck = cp.daysLimitCheck;
recordLimitSpin = cp.recordLimitSpin;
daysLimitSpin = cp.daysLimitSpin;
jobCombo = cp.jobCombo;
clientCombo = cp.clientCombo;
volumeCombo = cp.volumeCombo;
fileSetCombo = cp.fileSetCombo;
purgedCombo = cp.purgedCombo;
levelCombo = cp.levelCombo;
statusCombo = cp.statusCombo;
return *this;
}
/*
* Constructor for the controls class which inherits QScrollArea and a ui header
*/
JobPlotControls::JobPlotControls()
{
setupUi(this);
}
/*
* Constructor, this class does not inherit anything but pages.
*/
JobPlot::JobPlot(QTreeWidgetItem *parentTreeWidgetItem, JobPlotPass &passVals)
: Pages()
{
setupUserInterface();
pgInitialize(tr("JobPlot"), parentTreeWidgetItem);
readSplitterSettings();
QTreeWidgetItem* thisitem = mainWin->getFromHash(this);
thisitem->setIcon(0,QIcon(QString::fromUtf8(":images/applications-graphics.png")));
m_drawn = false;
/* this invokes the pass values = operator function */
m_pass = passVals;
dockPage();
/* If the values of the controls are predetermined (from joblist), then set
* this class as current window at the front of the stack */
if (m_pass.use)
setCurrent();
m_jobPlot->replot();
}
/*
* Kill, crush Destroy
*/
JobPlot::~JobPlot()
{
if (m_drawn)
writeSettings();
m_pjd.clear();
}
/*
* This is called when the page selector has this page selected
*/
void JobPlot::currentStackItem()
{
if (!m_drawn) {
setupControls();
reGraph();
m_drawn=true;
}
}
/*
* Slot for the refresh push button, also called from constructor.
*/
void JobPlot::reGraph()
{
/* clear m_pjd */
m_pjd.clear();
runQuery();
m_jobPlot->clear();
addCurve();
m_jobPlot->replot();
}
/*
* Setup the control widgets for the graph, this are the objects from JobPlotControls
*/
void JobPlot::setupControls()
{
QStringList graphType = QStringList() << /* tr("Fitted") <<*/ tr("Sticks")
<< tr("Lines") << tr("Steps") << tr("None");
controls->plotTypeCombo->addItems(graphType);
fillSymbolCombo(controls->fileSymbolTypeCombo);
fillSymbolCombo(controls->byteSymbolTypeCombo);
readControlSettings();
controls->fileCheck->setCheckState(Qt::Checked);
controls->byteCheck->setCheckState(Qt::Checked);
connect(controls->plotTypeCombo, SIGNAL(currentIndexChanged(QString)), this, SLOT(setPlotType(QString)));
connect(controls->fileSymbolTypeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(setFileSymbolType(int)));
connect(controls->byteSymbolTypeCombo, SIGNAL(currentIndexChanged(int)), this, SLOT(setByteSymbolType(int)));
connect(controls->fileCheck, SIGNAL(stateChanged(int)), this, SLOT(fileCheckChanged(int)));
connect(controls->byteCheck, SIGNAL(stateChanged(int)), this, SLOT(byteCheckChanged(int)));
connect(controls->refreshButton, SIGNAL(pressed()), this, SLOT(reGraph()));
controls->clientComboBox->addItem(tr("Any"));
controls->clientComboBox->addItems(m_console->client_list);
QStringList volumeList;
getVolumeList(volumeList);
controls->volumeComboBox->addItem(tr("Any"));
controls->volumeComboBox->addItems(volumeList);
controls->jobComboBox->addItem(tr("Any"));
controls->jobComboBox->addItems(m_console->job_list);
levelComboFill(controls->levelComboBox);
boolComboFill(controls->purgedComboBox);
controls->fileSetComboBox->addItem(tr("Any"));
controls->fileSetComboBox->addItems(m_console->fileset_list);
QStringList statusLongList;
getStatusList(statusLongList);
controls->statusComboBox->addItem(tr("Any"));
controls->statusComboBox->addItems(statusLongList);
if (m_pass.use) {
controls->limitCheckBox->setCheckState(m_pass.recordLimitCheck);
controls->limitSpinBox->setValue(m_pass.recordLimitSpin);
controls->daysCheckBox->setCheckState(m_pass.daysLimitCheck);
controls->daysSpinBox->setValue(m_pass.daysLimitSpin);
comboSel(controls->jobComboBox, m_pass.jobCombo);
comboSel(controls->clientComboBox, m_pass.clientCombo);
comboSel(controls->volumeComboBox, m_pass.volumeCombo);
comboSel(controls->fileSetComboBox, m_pass.fileSetCombo);
comboSel(controls->purgedComboBox, m_pass.purgedCombo);
comboSel(controls->levelComboBox, m_pass.levelCombo);
comboSel(controls->statusComboBox, m_pass.statusCombo);
} else {
/* Set Defaults for check and spin for limits */
controls->limitCheckBox->setCheckState(mainWin->m_recordLimitCheck ? Qt::Checked : Qt::Unchecked);
controls->limitSpinBox->setValue(mainWin->m_recordLimitVal);
controls->daysCheckBox->setCheckState(mainWin->m_daysLimitCheck ? Qt::Checked : Qt::Unchecked);
controls->daysSpinBox->setValue(mainWin->m_daysLimitVal);
}
}
/*
* Setup the control widgets for the graph, this are the objects from JobPlotControls
*/
void JobPlot::runQuery()
{
/* Set up query */
QString query("");
query += "SELECT DISTINCT "
" Job.Starttime AS JobStart,"
" Job.Jobfiles AS FileCount,"
" Job.JobBytes AS Bytes,"
" Job.JobId AS JobId"
" FROM Job"
" JOIN Client ON (Client.ClientId=Job.ClientId)"
" JOIN Status ON (Job.JobStatus=Status.JobStatus)"
" LEFT OUTER JOIN FileSet ON (FileSet.FileSetId=Job.FileSetId)";
QStringList conditions;
comboCond(conditions, controls->jobComboBox, "Job.Name");
comboCond(conditions, controls->clientComboBox, "Client.Name");
int volumeIndex = controls->volumeComboBox->currentIndex();
if ((volumeIndex != -1) && (controls->volumeComboBox->itemText(volumeIndex) != tr("Any"))) {
query += " LEFT OUTER JOIN JobMedia ON (JobMedia.JobId=Job.JobId)"
" LEFT OUTER JOIN Media ON (JobMedia.MediaId=Media.MediaId)";
conditions.append("Media.VolumeName='" + controls->volumeComboBox->itemText(volumeIndex) + "'");
}
comboCond(conditions, controls->fileSetComboBox, "FileSet.FileSet");
boolComboCond(conditions, controls->purgedComboBox, "Job.PurgedFiles");
levelComboCond(conditions, controls->levelComboBox, "Job.Level");
comboCond(conditions, controls->statusComboBox, "Status.JobStatusLong");
/* If Limit check box For limit by days is checked */
if (controls->daysCheckBox->checkState() == Qt::Checked) {
QDateTime stamp = QDateTime::currentDateTime().addDays(-controls->daysSpinBox->value());
QString since = stamp.toString(Qt::ISODate);
conditions.append("Job.Starttime>'" + since + "'");
}
bool first = true;
foreach (QString condition, conditions) {
if (first) {
query += " WHERE " + condition;
first = false;
} else {
query += " AND " + condition;
}
}
/* Descending */
query += " ORDER BY Job.Starttime DESC, Job.JobId DESC";
/* If Limit check box for limit records returned is checked */
if (controls->limitCheckBox->checkState() == Qt::Checked) {
QString limit;
limit.setNum(controls->limitSpinBox->value());
query += " LIMIT " + limit;
}
if (mainWin->m_sqlDebug) {
Pmsg1(000, "Query cmd : %s\n",query.toUtf8().data());
}
QString resultline;
QStringList results;
if (m_console->sql_cmd(query, results)) {
QString field;
QStringList fieldlist;
int row = 0;
/* Iterate through the record returned from the query */
foreach (resultline, results) {
PlotJobData *plotJobData = new PlotJobData();
fieldlist = resultline.split("\t");
int column = 0;
QString statusCode("");
/* Iterate through fields in the record */
foreach (field, fieldlist) {
field = field.trimmed(); /* strip leading & trailing spaces */
if (column == 0) {
plotJobData->dt = QDateTime::fromString(field, mainWin->m_dtformat);
} else if (column == 1) {
plotJobData->files = field.toDouble();
} else if (column == 2) {
plotJobData->bytes = field.toDouble();
}
column++;
m_pjd.prepend(plotJobData);
}
row++;
}
}
if ((controls->volumeComboBox->itemText(volumeIndex) != tr("Any")) && (results.count() == 0)){
/* for context sensitive searches, let the user know if there were no
* * results */
QMessageBox::warning(this, "Bat",
tr("The Jobs query returned no results.\n"
"Press OK to continue?"), QMessageBox::Ok );
}
}
/*
* The user interface that used to be in the ui header. I wanted to have a
* scroll area which is not in designer.
*/
void JobPlot::setupUserInterface()
{
QSizePolicy sizePolicy(static_cast<QSizePolicy::Policy>(1), static_cast<QSizePolicy::Policy>(5));
sizePolicy.setHorizontalStretch(0);
sizePolicy.setVerticalStretch(0);
sizePolicy.setVerticalStretch(0);
sizePolicy.setVerticalPolicy(QSizePolicy::Ignored);
sizePolicy.setHorizontalPolicy(QSizePolicy::Ignored);
m_gridLayout = new QGridLayout(this);
m_gridLayout->setSpacing(6);
m_gridLayout->setMargin(9);
m_gridLayout->setObjectName(QString::fromUtf8("m_gridLayout"));
m_splitter = new QSplitter(this);
m_splitter->setObjectName(QString::fromUtf8("m_splitter"));
m_splitter->setOrientation(Qt::Horizontal);
m_jobPlot = new QwtPlot(m_splitter);
m_jobPlot->setObjectName(QString::fromUtf8("m_jobPlot"));
m_jobPlot->setSizePolicy(sizePolicy);
m_jobPlot->setMinimumSize(QSize(0, 0));
QScrollArea *area = new QScrollArea(m_splitter);
area->setObjectName(QString::fromUtf8("area"));
controls = new JobPlotControls();
area->setWidget(controls);
m_splitter->addWidget(m_jobPlot);
m_splitter->addWidget(area);
m_gridLayout->addWidget(m_splitter, 0, 0, 1, 1);
}
/*
* Add the curves to the plot
*/
void JobPlot::addCurve()
{
m_jobPlot->setTitle(tr("Files and Bytes backed up"));
m_jobPlot->insertLegend(new QwtLegend(), QwtPlot::RightLegend);
// Set axis titles
m_jobPlot->enableAxis(QwtPlot::yRight);
m_jobPlot->setAxisTitle(QwtPlot::yRight, tr("<-- Bytes Kb"));
m_jobPlot->setAxisTitle(m_jobPlot->xBottom, tr("date of backup -->"));
m_jobPlot->setAxisTitle(m_jobPlot->yLeft, tr("Number of Files -->"));
m_jobPlot->setAxisScaleDraw(QwtPlot::xBottom, new DateTimeScaleDraw());
// Insert new curves
m_fileCurve = new QwtPlotCurve( tr("Files") );
m_fileCurve->setPen(QPen(Qt::red));
m_fileCurve->setCurveType(m_fileCurve->Yfx);
m_fileCurve->setYAxis(QwtPlot::yLeft);
m_byteCurve = new QwtPlotCurve(tr("Bytes"));
m_byteCurve->setPen(QPen(Qt::blue));
m_byteCurve->setCurveType(m_byteCurve->Yfx);
m_byteCurve->setYAxis(QwtPlot::yRight);
setPlotType(controls->plotTypeCombo->currentText());
setFileSymbolType(controls->fileSymbolTypeCombo->currentIndex());
setByteSymbolType(controls->byteSymbolTypeCombo->currentIndex());
m_fileCurve->attach(m_jobPlot);
m_byteCurve->attach(m_jobPlot);
// attach data
int size = m_pjd.count();
int j = 0;
#if defined(__GNU_C)
double tval[size];
double fval[size];
double bval[size];
#else
double *tval;
double *fval;
double *bval;
tval = (double *)malloc(size * sizeof(double));
fval = (double *)malloc(size * sizeof(double));
bval = (double *)malloc(size * sizeof(double));
#endif
foreach (PlotJobData* plotJobData, m_pjd) {
// printf("%.0f %.0f %s\n", plotJobData->bytes, plotJobData->files,
// plotJobData->dt.toString(mainWin->m_dtformat).toUtf8().data());
fval[j] = plotJobData->files;
bval[j] = plotJobData->bytes / 1024;
tval[j] = plotJobData->dt.toTime_t();
// printf("%i %.0f %.0f %.0f\n", j, tval[j], fval[j], bval[j]);
j++;
}
m_fileCurve->setData(tval,fval,size);
m_byteCurve->setData(tval,bval,size);
for (int year=2000; year<2010; year++) {
for (int month=1; month<=12; month++) {
QString monthBegin;
if (month > 9) {
QTextStream(&monthBegin) << year << "-" << month << "-01 00:00:00";
} else {
QTextStream(&monthBegin) << year << "-0" << month << "-01 00:00:00";
}
QDateTime mdt = QDateTime::fromString(monthBegin, mainWin->m_dtformat);
double monbeg = mdt.toTime_t();
// ...a vertical line at the first of each month
QwtPlotMarker *mX = new QwtPlotMarker();
mX->setLabel(mdt.toString("MMM-d"));
mX->setLabelAlignment(Qt::AlignRight|Qt::AlignTop);
mX->setLineStyle(QwtPlotMarker::VLine);
QPen pen(Qt::darkGray);
pen.setStyle(Qt::DashDotDotLine);
mX->setLinePen(pen);
mX->setXValue(monbeg);
mX->attach(m_jobPlot);
}
}
#if !defined(__GNU_C)
free(tval);
free(fval);
free(bval);
#endif
}
/*
* slot to respond to the plot type combo changing
*/
void JobPlot::setPlotType(QString currentText)
{
QwtPlotCurve::CurveStyle style = QwtPlotCurve::NoCurve;
if (currentText == tr("Fitted")) {
style = QwtPlotCurve::Lines;
m_fileCurve->setCurveAttribute(QwtPlotCurve::Fitted);
m_byteCurve->setCurveAttribute(QwtPlotCurve::Fitted);
} else if (currentText == tr("Sticks")) {
style = QwtPlotCurve::Sticks;
} else if (currentText == tr("Lines")) {
style = QwtPlotCurve::Lines;
m_fileCurve->setCurveAttribute(QwtPlotCurve::Fitted);
m_byteCurve->setCurveAttribute(QwtPlotCurve::Fitted);
} else if (currentText == tr("Steps")) {
style = QwtPlotCurve::Steps;
} else if (currentText == tr("None")) {
style = QwtPlotCurve::NoCurve;
}
m_fileCurve->setStyle(style);
m_byteCurve->setStyle(style);
m_jobPlot->replot();
}
void JobPlot::fillSymbolCombo(QComboBox *q)
{
q->addItem( tr("Ellipse"), (int)QwtSymbol::Ellipse);
q->addItem( tr("Rect"), (int)QwtSymbol::Rect);
q->addItem( tr("Diamond"), (int)QwtSymbol::Diamond);
q->addItem( tr("Triangle"), (int)QwtSymbol::Triangle);
q->addItem( tr("DTrianle"), (int)QwtSymbol::DTriangle);
q->addItem( tr("UTriangle"), (int)QwtSymbol::UTriangle);
q->addItem( tr("LTriangle"), (int)QwtSymbol::LTriangle);
q->addItem( tr("RTriangle"), (int)QwtSymbol::RTriangle);
q->addItem( tr("Cross"), (int)QwtSymbol::Cross);
q->addItem( tr("XCross"), (int)QwtSymbol::XCross);
q->addItem( tr("HLine"), (int)QwtSymbol::HLine);
q->addItem( tr("Vline"), (int)QwtSymbol::VLine);
q->addItem( tr("Star1"), (int)QwtSymbol::Star1);
q->addItem( tr("Star2"), (int)QwtSymbol::Star2);
q->addItem( tr("Hexagon"), (int)QwtSymbol::Hexagon);
q->addItem( tr("None"), (int)QwtSymbol::NoSymbol);
}
/*
* slot to respond to the symbol type combo changing
*/
void JobPlot::setFileSymbolType(int index)
{
setSymbolType(index, 0);
}
void JobPlot::setByteSymbolType(int index)
{
setSymbolType(index, 1);
}
void JobPlot::setSymbolType(int index, int type)
{
QwtSymbol sym;
sym.setPen(QColor(Qt::black));
sym.setSize(7);
QVariant style;
if (0 == type) {
style = controls->fileSymbolTypeCombo->itemData(index);
sym.setStyle( (QwtSymbol::Style)style.toInt() );
sym.setBrush(QColor(Qt::yellow));
m_fileCurve->setSymbol(sym);
} else {
style = controls->byteSymbolTypeCombo->itemData(index);
sym.setStyle( (QwtSymbol::Style)style.toInt() );
sym.setBrush(QColor(Qt::blue));
m_byteCurve->setSymbol(sym);
}
m_jobPlot->replot();
}
/*
* slot to respond to the file check box changing state
*/
void JobPlot::fileCheckChanged(int newstate)
{
if (newstate == Qt::Unchecked) {
m_fileCurve->detach();
m_jobPlot->enableAxis(QwtPlot::yLeft, false);
} else {
m_fileCurve->attach(m_jobPlot);
m_jobPlot->enableAxis(QwtPlot::yLeft);
}
m_jobPlot->replot();
}
/*
* slot to respond to the byte check box changing state
*/
void JobPlot::byteCheckChanged(int newstate)
{
if (newstate == Qt::Unchecked) {
m_byteCurve->detach();
m_jobPlot->enableAxis(QwtPlot::yRight, false);
} else {
m_byteCurve->attach(m_jobPlot);
m_jobPlot->enableAxis(QwtPlot::yRight);
}
m_jobPlot->replot();
}
/*
* Save user settings associated with this page
*/
void JobPlot::writeSettings()
{
QSettings settings(m_console->m_dir->name(), "bat");
settings.beginGroup("JobPlot");
settings.setValue("m_splitterSizes", m_splitter->saveState());
settings.setValue("fileSymbolTypeCombo", controls->fileSymbolTypeCombo->currentText());
settings.setValue("byteSymbolTypeCombo", controls->byteSymbolTypeCombo->currentText());
settings.setValue("plotTypeCombo", controls->plotTypeCombo->currentText());
settings.endGroup();
}
/*
* Read settings values for Controls
*/
void JobPlot::readControlSettings()
{
QSettings settings(m_console->m_dir->name(), "bat");
settings.beginGroup("JobPlot");
int fileSymbolTypeIndex = controls->fileSymbolTypeCombo->findText(settings.value("fileSymbolTypeCombo").toString(), Qt::MatchExactly);
if (fileSymbolTypeIndex == -1) fileSymbolTypeIndex = 2;
controls->fileSymbolTypeCombo->setCurrentIndex(fileSymbolTypeIndex);
int byteSymbolTypeIndex = controls->byteSymbolTypeCombo->findText(settings.value("byteSymbolTypeCombo").toString(), Qt::MatchExactly);
if (byteSymbolTypeIndex == -1) byteSymbolTypeIndex = 3;
controls->byteSymbolTypeCombo->setCurrentIndex(byteSymbolTypeIndex);
int plotTypeIndex = controls->plotTypeCombo->findText(settings.value("plotTypeCombo").toString(), Qt::MatchExactly);
if (plotTypeIndex == -1) plotTypeIndex = 2;
controls->plotTypeCombo->setCurrentIndex(plotTypeIndex);
settings.endGroup();
}
/*
* Read and restore user settings associated with this page
*/
void JobPlot::readSplitterSettings()
{
QSettings settings(m_console->m_dir->name(), "bat");
settings.beginGroup("JobPlot");
if (settings.contains("m_splitterSizes")) {
m_splitter->restoreState(settings.value("m_splitterSizes").toByteArray());
}
settings.endGroup();
}
|
Controlling opto-electronic characteristics of ternary IIVI alloyed quantum dots: alcohol processing assay For their ultra-wide color gamut, high efficiency, robustness, and solution processability, Cd-based alloy semiconductor quantum dots (AQDs) continue to proliferate by driving innovations in the fields of optoelectronics, photovoltaics, multiplex bio-imaging, and cancer research. Herein, non-toxic, low-cost isopropyl alcohol vapor-based oxidative treatment protocol is developed and applied to tune the light emission spectrum of crystalline coreshell CdSe1−xSx/ZnS quantum dots. As evidenced by the results of structural investigations, these AQDs when exposed to vapors produced ultrasonically from 10:1 isopropyl alcohol-to-water mix undergo an isotropic, diameter non-specific size reduction at the rate of ∼1.3 min−1. Nonlinear time-dependent spectral shifts, revealed experimentally, are consistent with the results of the effective-mass approximation treatment. The emission yields are seen to undergo an initial drop, yet to plateau as the etch time increases. The study opens a door to a soft, top-down monotonic tailoring of the light emission characteristics and opto-electronic response of stoichiometrically- and hierarchically-complex coreshell constructs in technologically-viable group IIVI nano-semiconductors as well as AQD-based catalytic conversion of organic compounds. Introduction Semiconductor quantum dots (QDs), also known as artificial atoms, are quasi-zero-dimensional physical systems with an ultimate degree of charge carrier and exciton confinement and highly refined, size-tunable optoelectronic response. A recent progress pertaining to the controlled synthesis of colloidal binary core-shell QDs and the development of diverse surface-capping strategies have helped propel technological innovations across many fields including opto-electronics, bio-imaging, cancer research, and quantum communication to new heights. In contrast to their elemental and binary counterparts, ternary semiconductor QDs and nanocrystals offer an additional degree of tunability of their light emission spectrum. These alloyed quantum dots (AQDs) possess many desirable characteristics such as increased external electroluminescent yield as well as photovoltaic efficiency that make them sought after for application in QD-based photo-detecting, energy harvesting, and light emitting devices. Group II-VI semiconductor crystals and their alloy counterparts possess a range of key technologicallyrelevant opto-electronic responses and were a subject of detailed investigations before. Among different ternary alloys, nanocrystal quantum dots in CdSeTe and CdSeS were studies most intensively as their band-edge emissions fall in the visible part of the electromagnetic spectrum. While a hot-injection method remains a primary route to producing binary colloidal AQDs, other variants including diffusion based post-growth modification and non-injection one-pot approaches have been additionally developed to synthesize substratefree AQDs. Yet, many challenges lie ahead and are stipulated by the inability to produce mono-disperse, size-and shapespecific AQDs while simultaneously maintaining their core stoichiometry. The problem is in-part associated Any further distribution of this work must maintain attribution to the author(s) and the title of the work, journal citation and DOI. with overall fast, yet dissimilar reaction rates of chalcogenide precursors. In addition, CdSeS spectral emission range can be practically tuned within a narrow window of ∼125 nm. To cover the entire visible range, i.e. ∼400-650 nm, the alloy content has to be tuned in conjunction with the quantum dot physical size. Unlike bulk semiconductors, whose dimensions can be altered using both additive and subtractive methods, owing to emission quenching by the surface defects/traps the top-down processing routes have to be ruled out for nano-semiconductors. Moreover, due to Ostwald ripening, synthesis of colloidal QDs with the size in a low nm-range is not feasible. Combined, this necessitates a search for and development of complimentary, yet soft and relatively fast etching protocols to tailor the size and, in turn, the opto-electronic response of stoichiometrically-complex nano-semiconductors including AQDs. On this front, the dissolution of simpler, i.e. CdSe colloidal quantum dots were reported earlier in 3-amino-1-propanol/water mixtures and in organic solvents by controlling precipitation-dissolution dynamics with surface stabilizing ligands. The amine-assisted etching of CdSe remains very slow and time non-monotonic yielding highly anisotropic, pyramidally-shaped CdSe nanocrystals with Cd-terminated facets. Faster etch rate, on the order of ∼0.1 A min −1 were achieved with oleic acid. Yet, the dissolution of CdSe was reported to be strongly size dependent with the size 'defocusing' and spectral broadening reported among the key drawbacks. As another drawback of amine-assisted etching, the temperature of the dissolution-growth reversal is to vary with the diameter which results in dissimilar QD size populations and undesirable color-splitting effects. In this work, we develop and report a facile, i.e. one-step isotropic size-tuning of CdSeS/ZnS core-shell AQDs using an isopropyl alcohol/water based etching protocol. In contrast to other reported studies, the treatment is done in a vapor phase which obviates the need for AQD precipitation and re-suspension in the etch solutions. The vapor-treated AQD specimen are confirmed to retain their shape anisotropy and alloy content while remaining luminescent. The emission spectra are further found to exhibit a non-linear dependence on the etch time and the effects are successfully modeled/accounted for using the effective mass approximation framework as discussed below. Processing methods Vapor-based treatment Highly-luminescent, ∼6-8 nm diameter, oleic acid-capped CdSe 0.83 S 0.17 /ZnS core-shell AQDs suspended in toluene (∼1 mg ml −1 ) were obtained commercially (Sigma Aldrich). Their morphology, crystal structure and atomic compositions were obtained and the results are given below. As a first step, a 1 l aliquot of AQDs is drop-cast onto a glass slide. Once toluene evaporated (∼3-4 min) the slide was fitted vertically into a vial containing ∼25 ml of C 3 H 8 O:H 2 O solution (10:1) with AQDs located ∼1 cm above the liquid/air interface. The solution was agitated and, in parallel, heated ultrasonically (Misonix 3000 with horn attachment, ∼9 W power). The QD samples were exposed to the etchant mist/vapors using ∼3 min time intervals for a total time of 15 min. At the end of each cycle, the slide was taken out to assess the spectral characteristics of QDs. The emission spectrum of AQDs treated with ∼10:1 isopropyl alcohol to DI water mixture exhibited a consistent blue-shift with the etch time, figure 1. Yet, treatments that were carried out similarly but only with either pure DI water or (∼99.9%) C 3 H 8 O yielded no discernible change of the light emission characteristics. Longer exposures (>15 min) to ∼99.9% pure C 3 H 8 O resulted in a noticeable reduction of the luminescence intensity. Liquid phase treatment Alternatively, the treatments were carried out in a liquid phase by drop-casting the same alcohol water solution directly onto AQDs whose temperature was maintained at ∼60°C with an external electrical (Joule) heater. While the spectral trends (color shifts) were similar, the alternative pathway is found to be an order of magnitude slower compared to the sonication method. We attribute this to an increase in the oxygen levels and net higher local temperatures, the latter are likely to aid oleic acid cap removal thus speeding up the processing. A monotonic time-dependent blue-shift of the emission color of the AQDs is a primary indicator of the AQDs undergoing etching in the presence of the alcohol-water vapors. To verify the mechanism and to identify the chemical pathway, we carried out a set of the structural and optical property characterizations of the post processed AQDs, the results of which are presented and discussed next. Results and analysis We relied on HITACHI H-9000NAR High Resolution Transmission Electron Microscope (HRTEM) to assess the effect of the alcohol etching on the crystal order, morphology, size distribution, and aspect ratio of the AQDs comparatively, i.e. before and after the treatment. Figure 3 shows representative HRTEM images of as-received AQDs and the samples processed for 15 min. The electron diffraction patterns were additionally obtained and the results confirmed that the crystal structure of AQDs was similar to that of wurtzite bulk CdSe (supplementary materials is available online at stacks.iop.org/MRX/7/075008/mmedia), though the former possess slightly reduced lattice constants: a=4.12±0.32 and c=6.72±0.04. HRTEM images of larger ensembles of AQDs were collected to quantify the impact of the etch time on the diameter and aspect ratio distributions. According to figure 2, for the etch time of ∼15 min the major diameter shrank by ∼2.0 nm whereas the diameter aspect ratio changed by −4.6%, i.e. from 1.30 to 1.24, thus confirming that the alcohol etch rate remains isotropic. The light emission characteristics of AQD samples were assessed next by carrying out room temperature cwphotoluminescence (PL) spectroscopic tests. The samples were excited with light generated by a 450 W Xenon lamp (excitation wavelength of ∼365 nm; optical power density of ∼5 mW cm −2 ) and the emission spectra were collected with an Olympus 51X microscope and dispersed by another double-grating monochromator onto a photomultiplier tube operating in a photon counting mode and serving as a detector. The instrument resolution was limited to 2 nm and the spectra were acquired in the range of ∼400-680 nm. The emission spectra of the processed AQDs were obtained by Gaussian decomposition/fitting that helped identify and remove background PL that consisted of two static PL bands with peaks centered at ∼500 and 556 nm (supplementary materials) and attributed to the etch reaction by-products. The stand-alone individual narrow-width bands seen in figure 4(a) are excitonic emission bands of the AQDs. These AQD bands exhibit characteristic and very consistent blue shifting and slightly broaden as etch time increases. While initially the integral PL intensity decreases, it quickly stabilizes and remains almost unchanged for the treatment durations of up to 15 min, figure 4(b). The observed initial drop of the quantum yield (QY) is generally expected as even a partial removal of the ZnS shell is to result in re-activation and increase of the ratio of the surface defects that act as non-radiative recombination centers. Assuming a typical shell thickness on the order of a monolayer, i.e. ∼3.1 for optimized core-shell QD configurations and the mean etch rate of ∼0.7 min −1, etch time of ∼4 min should be sufficient to strip ZnS. The removal of the shell is to coincide with the onset of the non-radiative recombinations-a primary cause of the luminescence loss for semiconductor nano-crystals. In our case, the intensity loss is already evident at ∼3 min, figure 4(b) and attributed to a larger etch rate of the defect-prone ZnS shell. Furthermore, as the QD diameter reduces, the QY is to drop due to a rise of the surface-to-volume ratio. Surface defects that act as PL quenches do play due to a large surface-to-volume ratio a more pronounced role in small diameter QDs, but only in uncapped ones. To our surprise and contrary to prior literature reports, the PL yields remain high for the treated samples as well. This is most likely due to the fact that etching recipe remains mild. In contrast, the QY remained almost time-independent for etch durations >3 min, figure 4(b), in part due to an increase in the oscillator strength of the excitonic transitions in smaller size 0D systems. The time-color interplay (figure 1) is next analyzed for bare AQDs (etch time >3 min) within the framework of effective mass approximation (EMA). The band gap energy of bulk alloyed CdSe 1−x S x can be obtained based on the following equation, the confinement effects prevail and are to control spectral shifts. In this regime, the photon emission energy of the AQDs is obtained by treating Coulomb e-h interactions perturbatively, with the emission energy given by where the second and third terms stand for the electron and hole confinement and Coulomb electrostatic potential energies, respectively. Approximating the electron and hole effective masses with their bulk values for CdSe: m e =0.13m 0 and m h =0.45m 0, and using r =6 for Cd 1 Se 0.83 S 0.17, we next obtain and plot the emission energy versus radius in figure 5. According to the results presented in figure 2, the size distribution, and, more specifically, diameter broadening is to vary only slightly with etching. Furthermore, as the temperature and, in turn, the net etch rate remain constant throughout the treatments, the radii of all AQDs are expected to shrink linearly with time. Given that the confinement energy prevails over the Coulomb energy (the last term in the above equation), both the spectral shift and the excitonic peak broadening are predicted to exhibit highly nonlinear time evolutions, which are readily evident in figure 5, the inset. This further serves as a confirmation that the light emission/ spectrum shifts are primarily controlled by the energy quantization effects in our AQDs with a mean diameter of up to ∼7 nm. It should be noted that the results additionally point to a very negligible, if any, role of the radial alloy content gradient that can be present and induced by unbalanced precursor reactivities within the AQD cores. The two background, i.e. ∼500 and 556 nm PL peaks revealed by the Gaussian spectral decomposition only appear in the processed samples and therefore are attributed to the reaction by-products. The intensity and width of the 500 nm peak remain constant for all treatment durations, whereas the intensity of the 556 nm peak slightly increased with the processing time. The bands are consistent and assigned to a defect-associated luminescence originating from ZnO, CdS and CdO surface defect states, respectively. More specifically, the 500 nm emission involves oxygen vacancies of ZnO by-product. These oxygen vacancies are known to form deep-donor recombination states within the band gap of ZnO. The origin of the 556 nm band is traced to the surface defect states of CdS and CdO. Oxidative degradation and etching of CdSe QDs could be linked to a molecular oxygen. Albeit possible, this pathway is to yield SeO, 2 SO 2 and, more importantly, CdO as a surface capping layer. The latter is however to arrest the etch process, in contrast to what was observed by us experimentally. Since treatments with the water alone were equally ineffective, AQD etching pathways that involve water most likely are thermodynamically unfavorable. Surprisingly, the AQD alcohol-based etching didn't proceed without water either. Due to an increased presence of defects, surface of Se-rich CdSeS is to carry a partial positive charge. This is to render the AQD surface hydrophilic and to aid the surface absorption of water and, in turn, alcohol molecules from the mist. According to prior work, a surface-supplied positive charge, i.e. hole, h + is integral to photo-oxidation/ conversion of isopropyl alcohol into acetone which is accompanied by the formation of hydrogen radicals on CdS surface. Owing to a large surface-to-volume ratio and superior light absorption characteristics of spherically-shaped QDs, the latter mechanism can be particularly beneficial to both catalytic conversion of various organic compounds as well as etching of CdS nano-structures. Depicted below are the key surface reactions that are likely, among others, to control the isopropyl alcohol etching of the core-shell AQDs in our case: In the above scheme, e − and h + are either thermally or photo-generated ZnS-and CdS-supplied charges. The role of water is thus limited to assisting with (a) absorption of CH CHOH Cd, 2 and -SO 4 2 etch products. The metal cations are to eventually react with the oxygen and to precipitate as solid ZnO and CdO. It is worth noting that many secondary reactions such as those involving, for instance, S, Se and H while not shown, are not to be ruled out. Summary One-step, highly non-toxic and isotropic isopropyl-water based vapor etching protocol was developed and applied to continuously down-tune the size of highly heterogeneous CdSe 1−x S x /ZnS core-shell QDs. In contrast to many other, prior-developed top-down semiconductor processing routes, the composition and shape anisotropy of AQDs remain unaffected by the etching. The water is argued to play an important but mostly secondary role: a dissolution and removal of the reaction intermediates/by-products which will otherwise impede the etching. To restore emission yields, over-growth of ZnS or other similar cladding layer should be considered as a final post-etch step. The protocol can be used for precise tailoring of opto-electronic characteristics of group II-VI core-shell nano-semiconductors as well as AQD-based catalytic conversion of organic compounds. |
Social cognitions and smoking behaviour: Temporal resolution matters. Objectives Health behaviour theories outline how cognitions predict behaviours, but rarely specify the temporal relation between cognitions and behaviours. It is not known whether these predictive relationships vary depending on temporal resolution or whether the relative influence of cognitions varies with measurement schedules. The current exploratory study therefore investigates whether the associations between behavioural cognitions (self-efficacy, intention, and risk perception) and smoking vary when measured momentarily, at day level, or using the more common baseline-follow-up design. Design EMA study involving 36 continuing smokers over 17 days. Participants logged cigarettes and reported their cognitions at baseline, daily (evening), and in response to momentary surveys. Methods Random-effects models were used to compare the effects of cognitions measured at different time points on the number of cigarettes smoked daily and the time interval until the next cigarette smoked. Results Self-efficacy and risk perception measured at baseline significantly predicted cigarettes smoked each day, but this effect became non-significant when daily measurements of cognitions were included in the model. Momentary smoking behaviour was predicted by momentary measurements of risk perception, with no significant effects of social cognitions at baseline. Conclusions Relationships between cognitions and behaviours vary according to the temporal resolution of the measurement schedule. Ensuring that the temporal resolution of assessment is appropriate for the temporal dynamics of the behaviour being assessed is important. Future research is needed to investigate the potential for leveraging specific cognitive processes depending on temporal importance in order to increase health-promoting behaviours. Statement of contribution What is already known on this subject? Social cognitions including intentions, risk perception, and self-efficacy have been observed to predict smoking. Little is known about the role of time in the cognition-behaviour relationship. Cognitions have been observed to fluctuate, but instability is typically not considered in research design. What does this study add? Daily measurement of social cognitions predicts behaviour better than measurements taken at baseline. Momentary smoking behaviour is predicted by momentary cognitions at the intra-individual level. Temporal resolution of measurement should be considered when investigating cognition-behaviour relationships. |
When I was studying multimedia design and communication some years ago the term User Experience had only been around for 1–2 years. It was a ‘sprout’ in the industry presented as an eclectic role for digital problem solvers. That’s where the majority of web designers, analysts and developers started to transmute into a very unique craft.
Fast forward to today, you have UX Designers, UX Developers, IA/UX Designers, UX Unicorns, UX<insert whatever hybrid title>. And me? I’m a Senior UX/UI Designer.
People often assume that this job rocks (and it does). They say things like: ‘I wish I could do what you do for money’. If you are one of them and wonder where you should start or what exactly this role is about, this post is my attempt to answer that question for you.
What does it take?
People tend to put UX into a creative specialist box, but the creativity is not just visual design. The truth is that we don’t use crayons to sketch out ideas, we don’t take creative breaks to play table football or play with legos to practice problem-solving. We don’t look for nor wait on inspiration from the Universe, use thousands of post-it notes to gather the ideas.
What we do is to take problems and deliver solutions in creative ways based on research and data. Where feedback streams from different types of users collide, we as UX designers mark the noticeable patterns. We then build, iterate and test each step of solving the initial problem. Importantly, the ‘testing’ could refer even to just fool-proofing ideas in your own mind, or doing it with one user or thousands of them. Sounds complicated? And it is.
To put it into basics:
Where quantified user needs meet business requirements — that’s where a UX designer makes the magic happen.
The interaction of these two elements is a daily process that the UX specialist has to manage. He acts as a translator and the iterations to the final product are the magic. Managing this process requires a lot of compromise and even more hustle.
What about skills?
To be a UX designer, you should have a wide range of important skills. The quickest way to figure out if you do have these skills, is to ask yourself — can you do the following without frying your brain out:
Can you shift between the ‘left’ and the ‘right’ side of your brain? Where analytical thinker meets the creative problem solver.
Are you an idea machine able to solve problems?
Do you have extensive knowledge of heuristics and best design practices, user psychology, human-computer interaction, usability, interactive patterns, the underlying technology, etc.?
Can you empathise and embody the user psyche into your own at every step of the project?
Do you strive to be eclectic, curious and to never stop learning?
Can you convey your ideas in a clear and easily digestible way?
Are you open to challenges? More importantly, do you take ‘No’ as a challenge?
Having knowledge or experience in one or more of the following areas is a plus: developer, visual designer, marketing specialist, researcher, analyst or in fact any other role involved in digital production.
Can you question every single decision and move you make throughout the day, so that you can answer the what, why and how? This means thousands of questions you have to face hour by hour. Your every solution has to fulfill user needs and cover the business requirements. You must be able to translate the data and research into answers. Finally you must be able to sell your solution to your boss, coworker, client and most importantly — the end user.
Lastly: Are you able to switch off your thinking and the problem-solving mechanism when the work day is over? This is where many specialists who display entrepreneurial values overreach, sacrificing their health, general wellbeing and work performance.
And there’s plenty of perks
Digital production remains an ever expanding industry as businesses focus more and more on their users. This tends to bring a solid demand for people who are able to solve problems and deliver mindful products. What this means for you is an amazing opportunity to explore and push your ideas forward without any doubts or fears. With immense demand the industry has your back.
What you do as a UX designer can make users relate and empathise with a digital or physical product. This then results in higher conversion rates and sales. As in any other field, if you are good at something and deliver an abundance of value you will get rewarded accordingly.
Furthermore, thanks to the eclectic nature of this field you will be able to apply the skills learned to other parts of your life. You will basically be able to solve any problem with your UX lenses on, designing the optimal solution, even if it’s just a date or you need to fix a household item in your house.
If you can handle all of this — you are in. |
class Formatter:
"""
Class the holds the formatters
"""
def split(self, sep='\n'):
"""
Split the string into lines
"""
pass
def replace_str(self, from_str, to_str):
"""
Replace a string with another string
"""
return Formatter() |
Currently, AFCS (Automated Flight Control Systems) in business class jets operate with two fundamental climb functions, Vertical Speed and Flight Level Change (FLCH).
Vertical Speed Mode is commonly utilized to provide a constant feet per minute rate of climb (vertical speed) based on a manual selection by the pilot. While this is an effective method to climb while the autopilot is engaged, it requires constant input from the pilot to adjust climb rates to ensure safe attitudes (the relation of an aircraft to its surroundings such as air flow and gravity, particularly the orientation of the aircraft with respect to the horizon) as the aircraft climbs through higher altitudes. If the Vertical Speed selection is left unattended, the airplane will attempt to maintain the climb rate through attitude adjustment with no regards to diminishing speed. Subsequently, it is quite possible to achieve a high altitude stall if the pilot is distracted and forgets to adjust the Vertical Speed according to aircraft altitude and performance.
FLCH Mode is frequently taught by training organizations as the primary method to climb via the AFCS because it maintains a constant air speed during the climb. This method virtually eliminates the possibility of stalling the aircraft during the climb through the flight levels. Unfortunately, the FLCH Mode cannot accommodate the constantly changing air data parameters affecting the aircraft speed through the air. Consequently, during a FLCH selected climb, the aircraft tends to oscillate up and down on the pitch axis (the orientation of the aircraft's longitudinal axis, whether the aircraft's nose is pointing up, down, or level with the horizon) in an effort to maintain a constant air speed through constantly changing temperatures at altitude. The result is an extremely erratic climb and a sometimes uncomfortable ride for the crew and passengers.
Consequently, it would be desirable to provide an automatic flight control system vertical control function which does not suffer from the drawbacks of Vertical Speed Mode and FLCH Mode. |
/**
* Daily activities
*
* @author Mohammad
*
*/
public class DailyActivity extends Model {
@Id
public ObjectId id;
@Reference
public List<Activity> activities;
@Embedded
public ActivityGoal goals;
@Reference
public ActivitySummary summary;
public String userId;
public Date date;
public static Model.Finder<ObjectId, DailyActivity> find() {
return new Model.Finder<ObjectId, DailyActivity>(ObjectId.class,
DailyActivity.class);
}
} |
Oliver Santen, press spokesman for the Allianz Group’s office in the capital, is to become head of PR and deputy company spokesman for Axel Springer AG from 1 June.
He will take over responsibility for all press work for the Berlin-based publisher, while, in the newly created role of deputy company spokesman, he will also support head of corporate communications Edda Fels in corporate and financial communications. He will also be responsible for print and online corporate PR publications.
The current head of PR, Carola Schmidt, will take on the role of head of advertising in the national retail division from 1 May. |
A Joint Signal Detection and Channel Estimation Method in Multi-user MIMO Relay Systems For multi-user multiple-input multiple-output (MIMO) Relay System, this paper develops a joint signal detection and channel estimation method based on Tucker-2 model. First, the proposed method encodes the transmitted information symbols at the source and transmits them to the relay. Then, the relay amplifies the received signals and broadcasts them to all users. Last, the Tucker-2 model is constructed and the iterative fitting algorithm is designed at the user end to realize the joint estimation of signals and channels. Theoretical analysis and simulation results show that the proposed method can effectively detect the information symbols and estimate the channel matrix when channel state information (CSI) is unknown and pilot signal is not used. In addition, compared with the existing algorithms, the proposed method has higher estimation performance and more relaxed application conditions. |
// Prefixes consumes the prefixes, if they are accepted by the policy they are
// forwarded to the registered consumer.
func (f PrefixesFilter) Prefixes(
remote addr.IA,
gateway Gateway,
prefixes []*net.IPNet,
) error {
rp := f.PolicyProvider.RoutingPolicy()
if rp == nil {
return nil
}
var sb netaddr.IPSetBuilder
allowedCount := 0
rejectedCount := 0
for _, prefix := range prefixes {
p, ok := netaddr.FromStdIPNet(prefix)
if !ok {
return serrors.New("can not convert prefix", "prefix", prefix)
}
set, err := rp.Match(remote, f.LocalIA, p)
if err != nil {
return serrors.New("error while filtering prefix", "prefix", prefix, "err", err)
}
sb.AddSet(&set.IPSet)
if len(set.Prefixes()) > 0 {
allowedCount++
} else {
rejectedCount++
}
}
metrics.GaugeSet(metrics.GaugeWith(f.Metrics.PrefixesAccepted,
"remote_isd_as", remote.String()), float64(allowedCount))
metrics.GaugeSet(metrics.GaugeWith(f.Metrics.PrefixesRejected,
"remote_isd_as", remote.String()), float64(rejectedCount))
set, err := sb.IPSet()
if err != nil {
return serrors.New("error while filtering prefixes", "prefixes", prefixes, "err", err)
}
var allowedPrefixes []*net.IPNet
for _, prefix := range set.Prefixes() {
allowedPrefixes = append(allowedPrefixes, prefix.IPNet())
}
return f.Consumer.Prefixes(remote, gateway, allowedPrefixes)
} |
//
// ChatViewCardCell.h
// ECSDKDemo_OC
//
// Created by zhouwh on 16/7/27.
// Copyright © 2016年 ronglian. All rights reserved.
//
#import "ChatViewCell.h"
extern NSString *const KResponderCustomChatViewCardCellBubbleViewEvent;
@interface ChatViewCardCell : ChatViewCell
@property (nonatomic, strong) UILabel *titleLab;
@property (nonatomic, strong) UIImageView *photoImg;
@property (nonatomic, strong) UILabel *nameLab;
@property (nonatomic, strong) UILabel *phoneLab;
@property (nonatomic, strong) UILabel *publicNameLab;
@end
|
#include <stdio.h>
#include <iostream>
#include <queue>
#include <utility>
using namespace std;
#define MOD 1000000007
#define nl '\n'
#define fio \
ios_base::sync_with_stdio(false); \
cin.tie(NULL); \
cout.tie(NULL)
#define ll long long
#define testcase \
int t; \
scanf("%d", &t); \
for (int test = 0; test < t; test++)
class p {
public:
int first;
int second;
p() {
first = 0;
second = 0;
}
p(int a, int b) : first(a), second(b) {}
bool operator<(const p &b) const {
if (second + b.first == b.second + first) {
return first > b.first;
} else {
return (second - first < b.second - b.first);
}
}
};
int main() {
testcase {
int n;
cin >> n;
priority_queue<p, std::vector<p>, std::less<p>> q;
int arr[n + 1];
int count = 0;
q.push(p(1, n));
while (q.size() > 0) {
p x;
x.first = q.top().first;
x.second = q.top().second;
q.pop();
if (x.first == x.second) {
arr[x.first] = ++count;
} else {
int t = (x.first + x.second) / 2;
arr[t] = ++count;
if (t + 1 <= x.second) {
q.push(p(t + 1, x.second));
}
if (t - 1 >= x.first) {
q.push(p(x.first, t - 1));
}
}
}
for (int i = 1; i <= n; i++) {
cout << arr[i] << " ";
}
cout << nl;
}
return 0;
}
|
/*
* Copyright 2014-2016 Groupon, Inc
* Copyright 2014-2016 The Billing Project, LLC
*
* The Billing Project licenses this file to you under the Apache License, version 2.0
* (the "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.killbill.billing.tenant.api;
import java.util.List;
import java.util.Locale;
import javax.inject.Inject;
import javax.inject.Named;
import org.killbill.billing.ErrorCode;
import org.killbill.billing.callcontext.InternalTenantContext;
import org.killbill.billing.tenant.api.TenantKV.TenantKey;
import org.killbill.billing.tenant.dao.TenantDao;
import org.killbill.billing.tenant.dao.TenantModelDao;
import org.killbill.billing.tenant.glue.DefaultTenantModule;
import org.killbill.billing.util.LocaleUtils;
/**
* This is the private API which is used to extract per tenant objects (catalog, overdue, invoice templates, ..)
* <p/>
* Some of these per tenant objects are cached at a higher level in their respective modules (catalog, overdue) to
* avoid reconstructing the object state from the xml definition each time. As a result, the module also registers
* a callback which is used for the cache invalidation when the state changes and the operation occurred on a remote node.
* For those objects, the private api is called from the module.
* <p/>
* Some others (invoice templates,...) are not cached (yet) and so the logic is simpler.
* <p/>
* The api can only be used to retrieve objects where no caching is required.
*/
public class DefaultTenantInternalApi implements TenantInternalApi {
private final TenantDao tenantDao;
private final TenantCacheInvalidation tenantCacheInvalidation;
@Inject
public DefaultTenantInternalApi(@Named(DefaultTenantModule.NO_CACHING_TENANT) final TenantDao tenantDao,
final TenantCacheInvalidation tenantCacheInvalidation) {
this.tenantDao = tenantDao;
this.tenantCacheInvalidation = tenantCacheInvalidation;
}
@Override
public void initializeCacheInvalidationCallback(final TenantKey key, final CacheInvalidationCallback cacheInvalidationCallback) {
tenantCacheInvalidation.registerCallback(key, cacheInvalidationCallback);
}
@Override
public List<String> getTenantCatalogs(final InternalTenantContext tenantContext) {
return tenantDao.getTenantValueForKey(TenantKey.CATALOG.toString(), tenantContext);
}
@Override
public String getTenantOverdueConfig(final InternalTenantContext tenantContext) {
final List<String> values = tenantDao.getTenantValueForKey(TenantKey.OVERDUE_CONFIG.toString(), tenantContext);
return getUniqueValue(values, "overdue config", tenantContext);
}
@Override
public String getTenantConfig(final InternalTenantContext tenantContext) {
final List<String> values = tenantDao.getTenantValueForKey(TenantKey.PER_TENANT_CONFIG.toString(), tenantContext);
return getUniqueValue(values, "per tenant config", tenantContext);
}
@Override
public String getInvoiceTemplate(final Locale locale, final InternalTenantContext tenantContext) {
final List<String> values = tenantDao.getTenantValueForKey(TenantKey.INVOICE_TEMPLATE.toString(), tenantContext);
return getUniqueValue(values, "invoice template", tenantContext);
}
@Override
public String getManualPayInvoiceTemplate(final Locale locale, final InternalTenantContext tenantContext) {
final List<String> values = tenantDao.getTenantValueForKey(TenantKey.INVOICE_MP_TEMPLATE.toString(), tenantContext);
return getUniqueValue(values, "manual pay invoice template", tenantContext);
}
@Override
public String getInvoiceTranslation(final Locale locale, final InternalTenantContext tenantContext) {
final List<String> values = tenantDao.getTenantValueForKey(LocaleUtils.localeString(locale, TenantKey.INVOICE_TRANSLATION_.toString()), tenantContext);
return getUniqueValue(values, "invoice translation", tenantContext);
}
@Override
public String getCatalogTranslation(final Locale locale, final InternalTenantContext tenantContext) {
final List<String> values = tenantDao.getTenantValueForKey(LocaleUtils.localeString(locale, TenantKey.CATALOG_TRANSLATION_.toString()), tenantContext);
return getUniqueValue(values, "catalog translation", tenantContext);
}
@Override
public String getPluginConfig(final String pluginName, final InternalTenantContext tenantContext) {
final String pluginConfigKey = TenantKey.PLUGIN_CONFIG_ + pluginName;
final List<String> values = tenantDao.getTenantValueForKey(pluginConfigKey, tenantContext);
return getUniqueValue(values, "config for plugin " + pluginConfigKey, tenantContext);
}
@Override
public String getPluginPaymentStateMachineConfig(final String pluginName, final InternalTenantContext tenantContext) {
final String pluginConfigKey = TenantKey.PLUGIN_PAYMENT_STATE_MACHINE_ + pluginName;
final List<String> values = tenantDao.getTenantValueForKey(pluginConfigKey, tenantContext);
return getUniqueValue(values, "payment state machine for plugin " + pluginConfigKey, tenantContext);
}
@Override
public List<String> getTenantValuesForKey(final String key, final InternalTenantContext tenantContext) {
return tenantDao.getTenantValueForKey(key, tenantContext);
}
@Override
public Tenant getTenantByApiKey(final String key) throws TenantApiException {
final TenantModelDao tenant = tenantDao.getTenantByApiKey(key);
if (tenant == null) {
throw new TenantApiException(ErrorCode.TENANT_DOES_NOT_EXIST_FOR_API_KEY, key);
}
return new DefaultTenant(tenant);
}
private String getUniqueValue(final List<String> values, final String msg, final InternalTenantContext tenantContext) {
if (values.isEmpty()) {
return null;
}
if (values.size() > 1) {
throw new IllegalStateException(String.format("Unexpected number of values %d for %s and tenant %d",
values.size(), msg, tenantContext.getTenantRecordId()));
}
return values.get(0);
}
}
|
# Copyright 2015 tsuru-circus authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from tornado.testing import gen_test
from circus.tests.support import TestCircus, async_poll_for
from circus.util import DEFAULT_ENDPOINT_DEALER, DEFAULT_ENDPOINT_SUB
from circus.util import tornado_sleep
from tsuru.plugins.statsd import Stats, storages
from tsuru.plugins.stats.fake import FakeBackend
from tsuru.plugins.stats.statsd import StatsdBackend
from tsuru.plugins.stats.logstash import LogstashBackend
from mock import patch, Mock
import tornado
from time import time
import multiprocessing
import functools
import os
def run_plugin(cls, config, plugin_info_callback=None, duration=300,
endpoint=DEFAULT_ENDPOINT_DEALER, pubsub_endpoint=DEFAULT_ENDPOINT_SUB):
check_delay = 1
ssh_server = None
plugin = cls(endpoint, pubsub_endpoint, check_delay, ssh_server, **config)
if hasattr(plugin, 'storage'):
plugin.storage.stop()
deadline = time() + (duration / 1000.)
plugin.loop.add_timeout(deadline, plugin.stop)
plugin.start()
try:
if plugin_info_callback:
plugin_info_callback(plugin)
finally:
plugin.stop()
return plugin.storage
@tornado.gen.coroutine
def async_run_plugin(cls, config, plugin_info_callback, duration=300,
endpoint=DEFAULT_ENDPOINT_DEALER, pubsub_endpoint=DEFAULT_ENDPOINT_SUB):
queue = multiprocessing.Queue()
plugin_info_callback = functools.partial(plugin_info_callback, queue)
circusctl_process = multiprocessing.Process(
target=run_plugin,
args=(cls, config, plugin_info_callback, duration, endpoint, pubsub_endpoint))
circusctl_process.start()
while queue.empty():
yield tornado_sleep(.1)
result = queue.get()
raise tornado.gen.Return(result)
def get_gauges(queue, plugin):
queue.put(plugin.storage.gauges)
class TestStats(TestCircus):
@gen_test
def test_stats(self):
envs = {
"TSURU_METRICS_BACKEND": "fake",
}
os.environ.update(envs)
dummy_process = 'circus.tests.support.run_process'
yield self.start_arbiter(dummy_process)
async_poll_for(self.test_file, 'START')
config = {'loop_rate': 0.2}
stats_class = Stats
stats_class.disk_usage = lambda x: 0
stats_class.net_io = lambda x: (0, 0)
stats_class.connections_established = lambda x: 0
stats_class.connections = lambda x: []
gauges = yield async_run_plugin(
stats_class, config,
plugin_info_callback=get_gauges,
duration=1000,
endpoint=self.arbiter.endpoint,
pubsub_endpoint=self.arbiter.pubsub_endpoint
)
# we should have a bunch of stats events here
self.assertTrue(len(gauges) >= 5)
last_batch = sorted(name for name, value in gauges[-5:])
wanted = ['test.cpu_sum', 'test.mem_max',
'test.mem_pct_max', 'test.mem_pct_sum',
'test.mem_sum']
self.assertEqual(last_batch, wanted)
yield self.stop_arbiter()
@patch("psutil.disk_usage")
def test_disk_usage(self, disk_usage_mock):
stats = Stats("endpoint", "pubsub", 1.0, "ssh_server")
stats.disk_usage()
disk_usage_mock.assert_called_with("/")
@patch("psutil.net_io_counters")
def test_net_io(self, net_io_mock):
stats = Stats("endpoint", "pubsub", 1.0, "ssh_server")
stats.net_io()
net_io_mock.assert_called_with()
@patch("psutil.net_connections")
def test_connections_established(self, conn_mock):
conn = Mock(status="ESTABLISHED")
conn_mock.return_value = [conn, conn, conn, conn, conn]
stats = Stats("endpoint", "pubsub", 1.0, "ssh_server")
established = stats.connections_established()
self.assertEqual(5, established)
conn_mock.assert_called_with("tcp")
def test_get_storage(self):
envs = {
"TSURU_METRICS_BACKEND": "fake",
}
os.environ.update(envs)
stats = Stats("endpoint", "pubsub", 1.0, "ssh_server")
storage = stats.get_storage()
self.assertIsInstance(storage, FakeBackend)
def test_statsd_default_storage(self):
del os.environ["TSURU_METRICS_BACKEND"]
stats = Stats("endpoint", "pubsub", 1.0, "ssh_server")
storage = stats.get_storage()
self.assertIsInstance(storage, StatsdBackend)
def test_logstash_registered(self):
self.assertIn("logstash", storages)
self.assertIsInstance(storages["logstash"](), LogstashBackend)
def test_get_a_not_registered_backend(self):
envs = {
"TSURU_METRICS_BACKEND": "doesnotexist",
}
os.environ.update(envs)
stats = Stats("endpoint", "pubsub", 1.0, "ssh_server")
storage = stats.get_storage()
# when a backend does not exists the default should be returned
self.assertIsInstance(storage, StatsdBackend)
|
Importance of viruses and Legionella pneumophila in respiratory exacerbations of young adults with cystic fibrosis. From January to April 1980 46 young adults with cystic fibrosis were studied for evidence of infection with a wide variety of microorganisms, including viruses and Legionella pneumophila. Two groups of patients were investigated: a "deteriorated" group of 24 patients who had experienced an increase in lower respiratory tract symptoms and fall in lung function values in the course of one month before the start of the study and a "stable" group of 22 patients with no such deterioration. All serological tests were repeated at one month and then one year after the beginning of the study. A fourfold rise in titres of antibodies to various viruses, Mycoplasma pneumoniae, and Coxiella burnetii was obtained in seven (29%) of the deteriorated group but in only one (4.5%) of the stable group (p less than 0.05). One other patient showed a fourfold rise in L pneumophila antibody titre (on the basis of the indirect fluorescent antibody test), which was accompanied by a respiratory illness consistent with legionnaires' disease. Eight of the 46 patients (17.4%) had demonstrable titres of antibody against L pneumophila (1/32 or above). |
Madonna is donating toward the construction of a new youth boxing gym in Detroit and buying iPads and other supplies for students at a city charter school.
In a news release Monday, the 55-year-old music icon says her contributions to three organizations represent "the first phase of a long-term commitment" to her hometown.
Madonna says a recent visit to Detroit left her "inspired by the progress she's witnessed." The Rock and Roll Hall of Famer grew up in Bay City and Rochester Hills.
She plans to provide funding for a new facility for the Downtown Youth Boxing Gym; purchase new supplies for the Detroit Achievement Academy; and help out The Empowerment Plan, which employs homeless women to sew garments that are distributed to the homeless. |
// a recursive method that counts the number of map entries
@SuppressWarnings("unchecked")
private void traverse (Map<Object, Object> m)
{
Object key=null;
Iterator<?> i = m.keySet().iterator();
while (i.hasNext())
{
key = i.next();
Object entry = m.get(key);
if (entry instanceof Map)
traverse((Map<Object, Object>)entry);
else
objCount++;
}
} |
n = int(raw_input())
a = raw_input().split()
b = []
for each in a:
b.append(int(each))
b.sort()
summa = 0
if n==1: print b[0]
else:
for i in range(0, (len(b)-2)):
summa += b[i]*(i+2)
summa += (b[len(b)-2] + b[len(b)-1])*n
print summa |
// Package workgroup provides synchronization for groups of related goroutines.
package workgroup
// RunFunc is a function to execute with other related functions in its own goroutine.
// The closure of the channel passed to RunFunc should trigger return.
type RunFunc func(<-chan struct{}) error
// Group is a group of related goroutines.
// The zero value for a Group is fully usable without initialization.
type Group struct {
fns []RunFunc
}
// Add adds a function to the Group.
// The function will be exectuted in its own goroutine when Run is called.
// Add must be called before Run.
func (g *Group) Add(fn RunFunc) {
g.fns = append(g.fns, fn)
}
// Run executes each function registered via Add in its own goroutine.
// Run blocks until all functions have returned, then returns the first non-nil error (if any) from them.
// The first function to return will trigger the closure of the channel passed to each function, which should in turn, return.
func (g *Group) Run() error {
if len(g.fns) == 0 {
return nil
}
stop := make(chan struct{})
done := make(chan error, len(g.fns))
defer close(done)
for _, fn := range g.fns {
go func(fn RunFunc) {
done <- fn(stop)
}(fn)
}
var err error
for i := 0; i < cap(done); i++ {
if err == nil {
err = <-done
} else {
<-done
}
if i == 0 {
close(stop)
}
}
return err
}
|
Rachel Brand. Photo: Tom Williams/CQ-Roll Call,Inc.
Robert Mueller’s investigation enjoys some independence from the Justice Department bureaucracy. But not that much independence: The DOJ retains the power to determine the amount of human and material resources the special counsel has at his disposal — and, crucially, whether to ultimately pursue prosecutions based on what he finds.
Since Attorney General Jeff Sessions recused himself from all matters involving the Trump campaign and Russia, those powers now rest with Deputy Attorney General Rod Rosenstein — the official who decided to appoint a special prosecutor in the first place.
But Rosenstein may be forfeiting such authority in short order. On Friday morning, the president publicly suggested that his deputy attorney general had joined a conspiracy against him.
Earlier in the week, we learned that Trump has discussed firing Mueller with his friends and aides. In the wake of such reports, Rosenstein assured the Senate that he would not fire the special counsel absent good cause, no matter who ordered him to do so.
In other words: Trump probably needs to get rid of Rosenstein if he wants to get rid of Mueller. And if the president was tempted to oust the special counsel on Monday, he’s sure to be all the more tempted now: In the last 72 hours, the Washington Post and the New York Times reported that the special counsel was looking into whether the president’s firing of Comey constituted obstruction of justice, while the Post also revealed that Mueller is examining Jared Kushner’s financial dealings with Russian entities.
But even if Trump decides he’s obstructed enough justice for the time being, Rosenstein’s days overseeing the Russia probe may still be numbered. The deputy attorney general recently informed colleagues that he may have to recuse himself from the Russia investigation, ABC News reported Friday.
This makes some sense. If Mueller is, in fact, investigating Trump for obstruction of justice, Rosenstein is likely to be a person of interest in that inquiry. After all, Rosenstein (perhaps unwittingly) supplied the White House with its initial rationale for Comey’s ouster, in the form of a memo criticizing the FBI director’s handling of the Clinton email investigation.
Regardless, if Rosenstein goes, by Trump’s hand or his own, the fate of the Mueller investigation — and, plausibly, this sorry republic — will rest with associate attorney general Rachel Brand.
Brand boasts the quintessential résumé for a GOP Justice Department appointee. A graduate of Harvard Law School, where she was active in the arch-conservative Federalist Society, Brand clerked for Supreme Court justice Anthony Kennedy before joining a white-shoe law firm, lending a hand to Elizabeth Dole’s presidential campaign, and then taking a job in the George W. Bush administration.
During the Bush years, Brand first worked under White House counsel Alberto Gonzales (where she may have learned a thing or two about politicizing law enforcement), and then in the Justice Department’s Office of Legal Policy.
Once Bush gave up steering America into epochal domestic and foreign policy crises for watercolor painting, Brand returned to the private sector. There, she helped big business contest mettlesome regulatory standards as chief counsel for regulatory litigation for the U.S. Chamber Litigation Center.
Brand was also one of 2,604 Iowans to set the Guinness world record for the largest Dutch “klompen” dance seven years ago, according to the Washington Post profile of the associate attorney general from late last month.
So Brand’s background is a bit more partisan — and decidedly more right wing — than Rosenstein’s. That said, Lawfare’s Benjamin Wittes, a Comey confidante who takes Mueller’s investigation exceptionally seriously, has confidence in Brand’s independence.
I think very highly of Rachel, who is a friend, a patriot, and a person in whom I have confidence.
Seems like there’s a decent chance she ends up fired, too. |
<reponame>Ben-Mann/NanoHorizon
/**
* MPU6050 IMU setup & read on the Arduino Nano / ATmega328P
*/
#include "mpu.h"
#include <Wire.h>
#define I2CMPU 0x68
#define MPU_ACCEL 0x3B
#define MPU_TEMP 0x41
#define MPU_PWR_MGMT_1 0x6B
#define ACCEL_CONFIG 0x1C
#define AFS_SEL_16G 0x18
#define LSB_PER_G_16G 2048.0f
Vector mpuAccel = Vector();
float mpuTemp;
void mpuSetup() {
Wire.begin();
Wire.beginTransmission(I2CMPU);
Wire.write(MPU_PWR_MGMT_1);
Wire.write(0x00); // Clear sleep bit; the MPU6050 starts in sleep mode.
Wire.endTransmission(true);
Wire.beginTransmission(I2CMPU);
Wire.write(ACCEL_CONFIG);
Wire.write(AFS_SEL_16G);
Wire.endTransmission(false);
}
void mpuGet() {
Wire.beginTransmission(I2CMPU);
Wire.write(MPU_ACCEL);
Wire.endTransmission(false);
Wire.requestFrom(I2CMPU, 6, true);
mpuAccel.x = (Wire.read() << 8 | Wire.read()) / LSB_PER_G_16G;
mpuAccel.y = (Wire.read() << 8 | Wire.read()) / LSB_PER_G_16G;
mpuAccel.z = (Wire.read() << 8 | Wire.read()) / LSB_PER_G_16G;
Wire.beginTransmission(I2CMPU);
Wire.write(MPU_TEMP);
Wire.endTransmission(false);
Wire.requestFrom(I2CMPU, 2, true);
// Formula from datasheet
mpuTemp = (Wire.read() << 8 | Wire.read()) / 340.0f + 36.53f;
} |
<reponame>elegantJava/iptv_sas<filename>src/main/java/com/hgys/iptv/model/User.java
package com.hgys.iptv.model;
import com.fasterxml.jackson.annotation.JsonIgnore;
import lombok.Data;
import lombok.NoArgsConstructor;
import org.hibernate.annotations.DynamicInsert;
import org.hibernate.annotations.DynamicUpdate;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.core.userdetails.UserDetails;
import javax.persistence.*;
import java.io.Serializable;
import java.sql.Timestamp;
import java.util.Collection;
/**
* @Author: wangzhen
* @Date:2019/4/19 16:26
*/
@Entity
@Table(name="sys_user")
@DynamicInsert
@DynamicUpdate
@Data
@NoArgsConstructor
public class User implements Serializable, UserDetails {
@Id @GeneratedValue(strategy = GenerationType.IDENTITY)
@Column(name = "id", unique = true, nullable = false, length = 11)
private Integer id;
@Column(name = "username", unique = true, nullable = false, length = 200)
private String username;//登录名
@Column(name = "realName", length = 200)//用户真实姓名
private String realName;
private Integer cpId;//-1=平台用户
private String cpAbbr;
@Column(name = "password", nullable = false, length = 200)
@JsonIgnore
private String password;
private String email;
private String telephone;//电话
private String mobilePhone;//手机号
private Timestamp createdTime;
private Timestamp modifyTime;
@Column(name = "status",length = 2)
private Integer status;//0:启用,1:禁用
private Integer isdelete;//0:未删除 1:已删除
// @ElementCollection(targetClass = SystemUserRole.class, fetch = FetchType.EAGER)
// @Enumerated(EnumType.STRING)
//用户-角色==多对多,维护关系方
// @ManyToMany(cascade = CascadeType.PERSIST,fetch= FetchType.EAGER)//立即从数据库中进行加载数据;
// @JoinTable(name = "sys_user_role", joinColumns = { @JoinColumn(name = "user_id") }, inverseJoinColumns ={@JoinColumn(name = "role_id") })
// private Set<Role> roles = new HashSet<>();
@Override
public boolean isAccountNonExpired() {
return true;
}
@Override
public boolean isAccountNonLocked() {
return true;
}
@Override
public boolean isCredentialsNonExpired() {
return true;
}
@Override
public boolean isEnabled() {
return false;
}
@Override
public Collection<? extends GrantedAuthority> getAuthorities() {
return null;
}
// public void setPassword(String password) {
// this.password = <PASSWORD>(password);
// }
}
|
// Copyright 2019 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.analysis;
import com.google.common.collect.ImmutableList;
import com.google.devtools.build.lib.actions.Artifact;
import com.google.devtools.build.lib.analysis.actions.FileWriteAction;
import com.google.devtools.build.lib.vfs.PathFragment;
/** The class for constructing command line for Bash. */
public final class BashCommandConstructor implements CommandConstructor {
private final PathFragment shellPath;
private final String scriptNameSuffix;
BashCommandConstructor(PathFragment shellPath, String scriptNameSuffix) {
this.shellPath = shellPath;
this.scriptNameSuffix = scriptNameSuffix;
}
@Override
public ImmutableList<String> asExecArgv(Artifact scriptFileArtifact) {
return ImmutableList.of(shellPath.getPathString(), scriptFileArtifact.getExecPathString());
}
@Override
public ImmutableList<String> asExecArgv(String command) {
return ImmutableList.of(shellPath.getPathString(), "-c", command);
}
@Override
public Artifact commandAsScript(RuleContext ruleContext, String command) {
String scriptFileName = ruleContext.getTarget().getName() + scriptNameSuffix;
String scriptFileContents = "#!/bin/bash\n" + command;
return FileWriteAction.createFile(
ruleContext, scriptFileName, scriptFileContents, /*executable=*/ true);
}
}
|
Follow us on Instagram for the cars and the noise.
Follow us on Instagram for the cars and the noise. Fancy Kristen is at Goodwood Revival! |
Great Wall of Gorgan
Name
Among archaeologists the wall is also known as "The Red Snake" because of the colour of its bricks. In Persian, it was popularized by the name "Alexander Barrier" (سد اسکندر Sadd-i-Iskandar) or "Alexander's Wall", as Alexander the Great is thought by early Muslims to have passed through the Caspian Gates on his hasty march to Hyrcania and the east. It is also known as the "Anushirvân Barrier" (سد انوشیروان) and "Firuz/Piruz Barrier" (سد پیروز), and is officially referred to as "Gorgan Defence Wall" (دیوار دفاعی گرگان). It is known as Qïzïl Yïlan or Qazal Al'an to local Iranian Turkmens.
Description
The barrier consists of a wall, 195 km (121 mi) long and 6–10 m (20–33 ft) wide, with over 30 fortresses at intervals of between 10 and 50 km (6.2 and 31.1 mi).
The building materials consist of mud-brick, fired brick, gypsum, and mortar. Clay was also used during the early Parthian period. Mud-bricks were more popular in the early period in the construction of forts and cities, while fired bricks became popular in the later period. Sometimes one brick was set in the vertical position, with two horizontal rows of bricks laid above and below. The sizes of mud or fired bricks differ, but in general the standard size was 40 × 40 × 10 cm. The fired bricks were made from the local loess soil, and fired in kilns along the line of the wall.
This wall starts from the Caspian coast, circles north of Gonbade Kavous (ancient Gorgan, or Jorjan in Arabic), continues towards the northeast, and vanishes in the Pishkamar Mountains.
The wall lies slightly to the north of a local river, and features a 5 m (16 ft) ditch that conducted water along most of the wall.
In 1999 a logistical archaeological survey was conducted regarding the wall due to problems in development projects, especially during construction of the Golestan Dam, which irrigates all the areas covered by the wall. At the point of the connection of the wall and the drainage canal from the dam, architects discovered the remains of the Great Wall of Gorgan. The 40 identified castles vary in dimension and shape but the majority are square fortresses, made of the same brickwork as the wall itself and at the same period. Due to many difficulties in development and agricultural projects, archaeologists have been assigned to mark the boundary of the historical find by laying cement blocks.
Larger than Hadrian's Wall and the Antonine Wall taken together (two separate structures in Britain that marked the northern limits of the Roman Empire), it has been called the greatest monument of its kind between Europe and China. The wall is second only to the walls that make up the Great Wall of China as the longest defensive wall in existence, and although now in substantial disrepair, it was perhaps even more solidly built than the early forms of the Great Wall.
Dating
Dr. Kiani, who led the archaeological team in 1971, believed that the wall was built during the Parthian Empire (247 BC–224 AD), and that it was reconstructed and restored during the Sassanid era (3rd to 7th century AD). In 2005 a team excavated samples of charcoal from the many brick kilns along the wall, and samples from the Gorgan Wall and the smaller Wall of Tammishe (location of a drowned fort at the northern end: 36°48.595′N 54°1.234′E; location of a fortlet or watchtower at the inland end: 36°43.360′N 54°3.675′E); OSL and radiocarbon dating indicated a date for both walls in the late 5th or 6th century AD. These dates suggest that the current wall, at least, is Sassanid rather than Parthian, and that the current structure did not yet exist, some 800 years earlier, in the time of Alexander the Great (died 323 BC). If Alexander encountered a barrier at this location it was a predecessor of the current wall.
If we assumed that the forts were occupied as densely as those on Hadrian's Wall, then the garrison on the Gorgan Wall would have been in the order of 30,000 men. Models, taking into account the size and room number of the barrack blocks in the Gorgan Wall forts and likely occupation density, produce figures between 15,000 and 36,000 soldiers. Even the lowest estimate suggests a strong and powerful army, all the more remarkable as our investigations focused just on 200km of vulnerable frontier, a small fraction of the thousands of kilometres of borders of one of the ancient world's largest empires.
Derbent Caspian Gate
A similar Sasanian defence wall and fortification lies on the opposite, western, side of the Caspian Sea at the port of Derbent, in the Republic of Dagestan, Russia. There the remains of a line of fortifications run inland for some 3 km (1.9 mi) from the shore of the Caspian Sea (42.062643°N 48.307185°E) to what is today an extraordinarily well preserved Sassanian fort (42.052840°N 48.274230°E) on the first foothills of the Caucasus mountains.
Derbent and its Caspian Gates are at the western part of the historical region of Hyrcania. While the fortification and walls on the east side of the Caspian Sea remained unknown to the Graeco-Roman historians, the western half of the impressive "northern fortifications" in the Caucasus were well known to Classical authors. |
On a New Outlier Rejection Technique This paper presents a novel approach to reject outliers based on parametric measurements, namely quiescent current and IC speed estimates. Unlike the existing multi-parameter or IDDQ-based techniques which use additional measurements to estimate the expected healthy IDDQ response, the authors focus on the variance to reject outliers. The authors take advantage of the correlation between the two types of measurements and of the fact that speed estimate measurements are more reliable than leakage ones in order to evaluate the acceptable current variation level. For that, the authors derive approximate variance equations from basic current and delay equations. Using these variance equations, the authors are able to identify current variations that cannot be justified by normal process variations, based on simple outlier rejection criteria. This approach is developed to be used as a pre-processing tool to other post-processing techniques. Results are promising. |
What’s a Few Hundred Billion Between Friends?
As Krugman points out, the amount of revenue forgone by lifting the top bracket from $250k to $450k is practically a rounding error, at least in budget terms. And while tax rates on investment income (capital gains, dividends) are still too low, at a time when the Fed is trying to use asset inflation as a backdoor out of the liquidity trap, should fiscal policy really be pushing in precisely the opposite direction?
More to the point, it escapes me why people who claim to understand that the primary economic risk right now is austerity, not a lack of it, are howling for tax increases—on anybody.
If you need to have something in the deal to be indignant about, how about the payroll tax hike? Not only will it put more than million people out of work, the tax itself is about the most regressive and destructive one imaginable. In a sane society we’d be talking about how to replace it, not raise it. But I digress.
Look, I understand the argument for raising the mortality rate on the Bush tax cuts: Over the long run, the federal government needs that money to safeguard the social safety net and keep the deficit boogey monster at bay.
Leaving aside the fact that the deficit monster (even the king-sized version allegedly lurking in the out years) might be a lot less scary than our indoctrinated media portends, the bottom line is I’m with Keynes: In the long run, we’re all dead. That being the case, I don’t think we should sacrifice the economically vulnerable to our fears for the future, including our fear that taxes might be hard to raise in that future.
The idea that the remaining Bush tax cuts are now “permanent” may be clever GOP marketing spin, but that doesn’t make it true. Does anything about the fiscal history of the United States, or even the post-Reagan era, suggest marginal tax rates are immutably carved in stone? Nothing is permanent—“nothing is written,” to quote Lawrence of Arabia (or actually, Peter O’Toole playing Lawrence of Arabia).
Recreating a Party of the Left
In that sense, I guess maybe I don’t stand with Keynes: I plan on sticking around for awhile, and I suspect most of you do, too. And, like you, I intend to go on fighting for the society I want my children—and their children, if I’m lucky—to live in.
That being the case, how you feel about this deal—or the debt limit deal to come, which almost certainly will smell even ranker—logically should depend on how immediate you think the harm will be, and what the chances are for fixing the bad stuff before it seriously damages the social safety net.
That, in turn, should depend on where you think we stand in the political cycle that more or less began with the election of Ronald Reagan almost a third of a century ago.
The point I’m getting to, at last, is that this is about much more than Obama and his alleged sociopathology. This is about the future of the Democratic Party, and, for that matter, about what we dare to call the progressive movement—which, let’s be honest, to past generations of progressives, wouldn’t look very progressive at all.
Conservative apostate Bruce Bartlett (last seen in this space getting his hide ripped off by Markos for daring to compare the modern Democratic Party to the segregationist Dixiecrat party of old) puts it this way:
the nation no longer has a party of the left, but one of the center-right [i.e. the Democrats] that is akin to what were liberal Republicans in the past—there is no longer any such thing as a liberal Republican—and a party of the far right.
Can anyone deny this? It’s not even new: Readers of Bob Woodward’s book about the Clinton economic plan,may recall the scene where Bob Rubin informs the president that the plan is for him to kneel down and suck Wall Street’s dick (instead of the other way around), and an exasperated Clinton shouts: “I hope you're all aware we’re the Eisenhower Republicans here!”
Truer words never spoken. And it took a bunch of guys like Newt Gingrich, Dick Armey and Tom DeLay to make us all fully appreciate the enlightened liberalism of Dwight D. Eisenhower.
But I’m here to argue that the “liberal Republicanization” of the Democratic Party, currently on full display in the ongoing fiscal talks, is actually evidence of success, not failure. It is a success that could—not certainly, not probably, but just possibly—lead to a more progressive future. But getting from here to there obviously isn’t going to be easy.
Obama’s Not the One
My analysis starts with the observation that there are some striking similarities between the current political cycle (the Age of Reagan) and the previous one (the Age of Roosevelt).
A short-hand way of explaining those similarities—and their significance—would be to say that I look at Obama as the Democratic Nixon.
I realize that probably doesn’t go down well with the Obama fans out there, so let me add immediately that it isn’t meant to be taken literally. Nixon really was a sociopath, if not a psychopath—a criminal of monstrous dimensions (See: Hanoi, 1972 Christmas bombing of). And that’s not even bringing Watergate into the discussion.
Unless Michelle Bachmann’s paranoid fantasies about Solyndra are actually true, or the drone program is much worse than we now know, Obama isn’t even close to being in Nixon’s league. He actually seems to be a pretty good guy, for an Emperor.
But in the current political cycle, Obama sits right there in Tricky Dick’s spot—after the Democratic Eisenhower (Clinton) but before the Democratic Reagan, i.e. the one who will free the Matrix and bring balance to the force.
A long time ago, back at my old blog, I wrote a couple of long posts about the dialectics of American politics—the back-and-forth flow of power between the two major parties—and how the parties themselves are constantly being changed thereby.
This reflects the reality is that in a democratic (well, quasi-democratic) system, victories and defeats, even big ones, are never final. Rather, they set in motion the partisan changes that eventually drive the next cycle.
In the American system, this process can be very deceptive, since it usually (but not always—just ask the Whigs) results in the transformation of the two existing parties, rather than their replacement by new ones.
So it has been both in the Age of Roosevelt and the Age of Reagan.
The coming of the New Deal hived off big chunks of the old Republican coalition (Midwestern farmers, small business owners, blacks), turning the Democrats into the majority party. To survive, the GOP was forced to adapt, and did so by becoming the “New Deal Lite” party, much to the fury of its own conservative base. The moderates—i.e. the Eisenhower Republicans—prevailed because they knew how to win elections in a typically hostile environment.
But majorities are inherently fractious and hard to manage—and even harder to reform. Through the ‘60s and ‘70s, the competing demands of old and new Democratic constituencies first undermined and then shattered the New Deal coalition.
The GOP, meanwhile, grew increasingly receptive both to conservative ideas (in an era of New Deal failures, being the New Deal Lite party was no longer advantageous) and to disaffected Democrats, especially those who could be persuaded to overlook their economic interests with cultural and/or racial appeals.
Once Reagan had consolidated these forces, the way was clear for the GOP to demolish the Democrats in the 1980 and 1984 elections and emerge as the new majority party.
The Nixon Transition
But between Eisenhower and Reagan there was Nixon: By political pedigree, an Eisenhower Republican (Ike’s veep), but by personal style a conservative—not least in the way he drove the liberals of the time absolutely ape shit.
A transitional figure, in other words, not a transformative one—acceptable to both wings of his party at key moments (like the 1968 primaries), but not really trusted by either of them.
In terms of domestic policy, economic policy in particular, Nixon was also something of an enigma—in part because he didn’t care much about it, but also because he and his political team were wary of alienating the Democratic crossovers who had put him in the White House. This meant he lacked the political strength to challenge an orthodox Democratic Congress, as Reagan would and could do a decade later.
On the other hand, Nixon’s rhetoric very much prefigured Reagan in its contempt for Ivy Leaguers, liberal bureaucrats, and welfare queens—the Satanic trinity of the conservative imagination. And he wasn’t entirely impotent when it came to policy: He was able to kill or cripple the more progressive, activist elements of LBJ’s Great Society program—but only because conservative Democrats helped him do it.
If you flip that story over, so that you’re looking at the reverse image, you can see the Democratic Party, and its leader, as they stand today.
It definitely isn’t the party of my childhood, which rested on a four-legged stool of industrial unions, minorities, Southern whites, and a smattering of liberal professionals. The first leg has been whittled to a toothpick by globalization and white flight, the second is now painted in rainbow colors, not basic black, the third detached and joined itself to the GOP's stool, while the fourth has grown by leaps and bounds—almost to the point where the party now looks more like a leg of the stool that educated and affluent liberals sit on, rather than the other way around.
On the other hand, those changes also put the Democrats in a position once again to credibly claim to be the nation’s majority party—on cultural and demographic grounds, at least.
Between Past and Future: Obama in the Middle
But the process isn’t complete, and the Democrats still bear the scars of their years in the minority, just as the GOP did in Nixon’s time. The formerly GOP-leaning suburbs that voted for Obama twice are still largely Republican in their local politics, and battlegrounds at the congressional level. To a certain extent, Democrats are still looking over their shoulder, nervous that their new suburban supporters will abandon them.
These are the political realities that have shaped Obama’s presidency, and his economic positions. The Democratic Party has always had a fiscally conservative wing, sympathetic with (if not wholly owned by) Wall Street. The influx of affluent professionals into the party, and the parallel collapse in labor power, have greatly empowered that wing.
Where we stand now isn’t exactly the inverse of the positions of the two parties in Nixon’s day (history may rhyme; it never repeats) but it’s close enough for government work, so to speak.
This is why it shouldn’t come as a big surprise that Obama, the transitional leader of a coalition in transition, is governing from the center or even the center right, his progressive campaign rhetoric notwithstanding—just as Nixon often appeared to be governing from the center or center left, his conservative rhetoric notwithstanding.
But it’s also important to recognize that Obama, for all his policy centeredness, still represents a break with the “Eisenhower Republican” phase of the Democratic Party revival.
Whatever you think of Obamacare, it marked a shift to the left from the Clinton program (or at least the Clinton accomplishments). So did the stimulus, Dodd-Frank, the auto bailout, and the Obama NLRB.*
*It's funny how almost all of the recent coverage of the NLRB that I can find on Google is from the right-wing press. Don't progressives care about this stuff any more?
These weren't just products of the financial meltdown (after all, the Democratic Congress that approved them was elected two years before the crisis hit). They were early signs that the political cycle has turned.
Emphasis here on the word "early." Every political trend has its counterfactuals. The 1974 post-Watergate blowout election, for example, looked like a GOP death blow at the time, but merely postponed the party’s triumph for a few years. It’s a lesson the teabaggers of 2010 are now chewing over in the wake of 2012—although I don’t expect them to digest it any time soon.
Looking for Hope Without the Proles
But while the Democratic tide seems to be rising again, we’ve no way of knowing if it will turn into a progressive flood that eventually washes away the remnants of the Reagan coalition and changes the “center right” party that Obama inherited into a center left (or just plain left) party that a Democratic Reagan might lead.
Certainly, there are plenty of reasons why it won’t, and can’t. One is the enduring power of the corporatist wing of the party, and the corresponding weakness of organized labor: the traditional powerhouse of progressive economic policy.
God knows I don’t want to be accused of being an optimist here—I have a reputation to protect. It’s laughably easy to imagine a majority party that staunchly backs a woman’s right to choose, supports gay marriage, appeals to African Americans and Hispanics with a carefully selected set of valence issues—and quietly cooperates in dismantling the social safety net under the guise of “reforming” it.
It’s easy to imagine such a party because we already have one.
On the other hand, the economic trends don’t appear very friendly to such a party. Prospects seem dim for a growth resurgence that would take the edge off high unemployment, stagnant wages, and rising income inequality. At some point, being the pro-choice, pro-immigrant, pro-racial sensitivity party might not be good enough.
If there is hope, Orwell said, it lies with the proles. I’m not that naïve. (In any case, in a globalized economy, the old industrial proletariat mostly lives in China, and doesn’t get to vote, here or there.) But I can think of two specific reasons to think a more progressive Democratic Party might be possible to build:
The Democrats (or at least the Obama machine) has re-learned how to organize, and taken it high tech. If those tools could be applied successfully at the local level, and/or by allied forces (unions, nonprofits, etc.) maybe the party itself could develop the grassroots muscle the unions once provided—which in turn could be used to advance an economic agenda, not just win elections.
The white-collar professionals and paraprofessionals who have defected to the Democrats on cultural grounds are also now in danger of being proletarianized. Technology is rendering their skills (e.g. medical diagnostics, legal research, engineering design, etc.) obsolete. This might make them amenable to a more progressive economic approach.
Don’t Mourn, Organize
We all know the obstacles: A sluggish, corporate-controlled media that likes the status quo just fine, thanks; billionaire donors with money to burn (almost literally, in Sheldon Adelson’s case); a largely de-unionized white working class that clings to the GOP even more tenaciously than it does to its guns and religion.
Modern technology notwithstanding, there are no magic wands, just updated versions of the same old democratic (small d) tools: organize, agitate, contribute, vote. But it might not hurt to remember that the original progressives, the people who built the unions and fought for the New Deal, did what they did with those same tools.
In any case, we have to try. Sooner or later, the Republicans are going to learn how to adapt, and will find their own tools for chipping away at the Democratic coalition. The GOP won’t always be held hostage by the teabaggers.
Or, even worse, an economic crisis—one that corporate Democrats have no answers for—will drive otherwise sane and rational voters into the arms of the teabaggers, fueling a resurgence of angry right-wing populism. And if you’re not worried about where that could lead, you haven’t been paying attention.
Either way, if progressives just bitch about Obama, instead of trying to shape his Democratic Party to their own ends, we may wind up looking back on his presidency, and his crummy budget deals, as the progressive Golden Age—or as close as we ever came to one. |
March 30, 2018 2017-18 season, Dwyane Wade, Erik Spoelstra, Goran Dragic, Tom D'Angelo.
MIAMI – With all the talk about the Heat picking up the pace since the All-Star break, don’t forget about another aspect of Miami’s game, one that surely pleases coach Erik Spoelstra more than anything.
The Heat are getting defensive.
Miami limited the Bulls to 36.7 percent from the floor during Thursday’s 103-92 victory, the second consecutive game the Heat have held an opponent below 40 percent – Cleveland shot 36.5 percent Tuesday – and the 15th time this season, tying Boston, Philadelphia, Portland and Utah for the stingiest defensive efforts.
The Heat have held four of the top six teams to under 100 points and 40 percent shooting in a game. They have done it once against Golden State, Boston and Toronto and twice against Cleveland. Houston and Portland are the exceptions.
Spoelstra, whose teams held opponents under 40 percent shooting 12 times last season, was impressed with Thursday’s effort coming just two days after an emotional victory over the Cavaliers, a game played with a playoff feel. This despite the Bulls owning the league’s worst field goal percentage of .435.
The Heat (41-35) are eighth in the league with a defensive rating of 104.1 points. That number has improved since the All-Star break with a 103.2 rating the last 18 games.
Heat opponents shot .452 from the field in the 58 games before the break and are shooting .448 since. And that pre-All-Star break number includes a season-high streak of three consecutive games in which Miami limited Charlotte, Dallas and Cleveland to field goal percentages lower than 40 percent.
Miami (41-35) would appear to have a good chance to extend its current streak to three games Saturday when it faces Brooklyn (24-51), the second-worst shooting team in the league at 439.
The problem: The Nets are shooting .474 (108 of 228) against the Heat this season while winning two of the three games.
The Heat have shot less than 40 percent nine times this season but have manage to win four of those games.
For Miami, everything now is geared toward the playoffs, which start in two weeks. And being at the top of its game defensively would be a great help. |
import {
SponsorsResponseNode,
Sponsor
} from './types'
import subWeeks from 'date-fns/subWeeks'
import subMonths from 'date-fns/subMonths'
function getProfileURL (node: SponsorsResponseNode) {
return node.fromAccount.website ?? `https://opencollective.com/${node.fromAccount.slug}`
}
export function isActive(node: SponsorsResponseNode) {
return node.status === 'ACTIVE' && new Date(node.updatedAt).getTime() > subWeeks(subMonths(Date.now(), 1), 2).getTime()
}
export function isSilver(node: SponsorsResponseNode) {
return isActive(node) && (node.tier?.slug === 'silver-sponsors' || node.amount.value >= 100)
}
export function isBronze(node: SponsorsResponseNode) {
return isActive(node) && (node.tier?.slug === 'bronze-sponsors' || (node.amount.value >= 50 && node.amount.value < 100))
}
export function isBacker(node: SponsorsResponseNode) {
return isActive(node) &&
!isSilver(node) &&
!isBronze(node) &&
node.amount.value >= 10 &&
new Date(node.createdAt).getTime() < new Date(2020, 4, 5).getTime()
}
export function sponsorsMapFn (node: SponsorsResponseNode): Sponsor {
return {
id: node.fromAccount.slug,
url: getProfileURL(node),
imageUrl: node.fromAccount.imageUrl,
name: node.fromAccount.name,
}
} |
Remix OS is a modified version of Google Android designed to make Google’s mobile operating system feel like a desktop OS. It has a taskbar, a desktop, and support for running apps in windows that can be moved or resized. Remix OS developer Jide launched a version of the operating system that can be installed on PCs this year, but in 2015 the company also launched two of its own hardware devices: the Remix Ultra Tablet and the Remix Mini desktop.
While Remix OS for PC does not include the Google Play Store and other Google apps, the version of the software that came pre-loaded on Jide’s own hardware did… until now.
Remix OS 2.0.307 for the Remix Mini desktop is now available. Among other things, it removes support for Google Mobile Services.
You can still sideload Google’s mobile framework yourself if you want to be able to use the Google Play Store. And Jide says users can opt to skip the software update if they want to continue using an older version of Remix OS with Google services intact… but they won’t be able to install any future updates or bug fixes if they do that.
When I asked why Google Mobile Services had been removed, I received this response:
The reason this update has removed GMS, is to ensure that Google can deliver a consistent experience across all Android devices for all apps. We are committed to bring users the ultimate Android experience, and will continue to dedicate our resources to do this. Remix Mini puts Android on varying screen sizes and usage situations and Google needs to verify each of them. We are working with Google to streamline the process.
I still find the move a bit baffling, since I know Jide had actually been talking to Google about including the Play Store, Gmail, YouTube, and other apps on more versions of Remix OS, not fewer. Specifically, the company had been hoping to be able to pre-install the Google framework on Remix OS for PC, making it possible to run the Play Store on any laptop, desktop, or other computer running Remix OS… although Google had yet to approve that plan.
Update: Jide says tablets and other more devices with more traditional form factors for Android continue to meet Google’s compliance tests, which means the Play Store will continue to be available on the Ultra Tablet and similar devices. But Google has apparently noticed some apps do not work as expected on desktop-style devices like the Remix Mini, so the company has asked Jide to remove Google Mobile Services for now.
It’s possible that GMS could return in the future if Google and/or Jide make changes needed to ensure that all apps work as expected.
While the Google Play Store is absent from the latest version of Remix OS for the Mini, there is a new Remix Central utility, which seems to be a sort of third-party app store. You can also continue to download apps from other stores or websites such as APK Mirror or the Amazon Appstore.
thanks Mitchell! |
Pedro Saúl Pérez
Pedro Saúl Pérez (c. 1953 – October 1, 2007) was a Dominican Republic advocate for the rights of Dominican Republic immigrants and migrants in Puerto Rico. Pérez was the founder and president of the Dominican Committee for Human Rights of Puerto Rico.
A legal resident of Puerto Rico for almost 40 years, Pedro Saúl Pérez earned a living as a taxi driver, but spent his remaining time speaking out for the rights of Dominicans living in Puerto Rico. Many Dominican residents of the island complain about persistent discrimination from the Puerto Rican majority. One of Pérez's greatest legal victories was his ability to convince Dominican authorities to investigate the death of a Dominican illegal immigrant who died during a Puerto Rican police raid in the 1990s. Pérez was also a leader in opposing a proposed tax on remittances sent to the Dominican Republic by Dominican residents on the island. The proposal was ultimately defeated.
Pedro Saúl Pérez was 54 years old when he died of an apparent heart attack while walking on a street in Puerto Rico's capital city, San Juan. His remains were repatriated for burial, accompanied by his wife and son, in the Dominican Republic. |
<filename>_clones/data-structures-and-algorithms-in-java-6th-edition-goodrich-tamassia/src/main/java/dsaj/design/Student.java<gh_stars>0
/*
* Copyright 2014, <NAME>, <NAME>, <NAME>
*
* Developed for use with the book:
*
* Data Structures and Algorithms in Java, Sixth Edition
* <NAME>, <NAME>, and <NAME>
* <NAME> & Sons, 2014
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package dsaj.design;
public class Student implements Person {
String id;
String name;
int age;
public Student(String i, String n, int a) { // simple constructor
id = i;
name = n;
age = a;
}
protected int studyHours() { return age/2;} // just a guess
public String getID() { return id;} // ID of the student
public String getName() { return name; } // from Person interface
public int getAge() { return age; } // from Person interface
public boolean equals(Person other) { // from Person interface
if (!(other instanceof Student)) return false; // cannot possibly be equal
Student s = (Student) other; // explicit cast now safe
return id.equals(s.id); // compare IDs
}
public String toString() { // for printing
return "Student(ID:" + id + ", Name:" + name + ", Age:" + age + ")";
}
}
|
Trypanosoma cruzi culture used as vaccine to prevent chronic Chagas' disease in mice The development of chronic pathology in mice at 2 to 10 months after inoculation of 10 T. cruzi trypomastigotes can be prevented by preimmunization with live, attenuated culture parasites (strain TCC). Swiss mice received one or three immunizing inoculations of 10 TCC organisms and were challenged with 10 Tulahun blood trypomastigotes. Control groups received only the immunizing or the challenge inoculations. Immunized groups as compared with nonimmunized controls had lower mortality rates at 2 months postchallenge (9% versus 23%; P = 0.059), lower early peaks of parasitemia, lower percentages of positive xenodiagnoses at 5.5 months (40 versus 80%; P = 0.061), and lower incidences of tissue lesions in the skeletal muscle (P less than 0.005) at 2,6, and 10 months postchallenge. Tissue lesions in the heart and smooth muscle were also reduced, reaching statistical significance after 10 months (P less than 0.02). Chronic pathology parameters were never enhanced in preimmunized groups. In spite of the putative role that autoimmunity may play in the development of chronic chagasic lesions, the preventive effect of vaccination is readily exerted upon the chronic murine model of Chagas' disease. |
<gh_stars>1-10
package graphqlbackend
type LSIFDocumentationPageArgs struct {
PathID string
}
type DocumentationPageResolver interface {
Tree() JSONValue
}
|
After years in planning, construction on the Silverton windfarm in western New South Wales is finally set to begin after the sale of the project from AGL to its Powering Australian Renewables Fund (PARF).
The deal will see AGL pay just $65 a megawatt hour for the first five years of the windfarm’s operation, effectively undercutting current prices for coal-generated electricity.
“It’s a very low price, which demonstrates the amazing innovation and cost curve that renewable energy is on,” says Alicia Webb, director of large-scale energy at the Clean Energy Council, the clean energy industry’s peak body.
A joint venture between GE and the civil engineering construction company Catcon will see 58 GE wind turbines installed in the Barrier Ranges, 5km north of the town of Silverton and 25km north-west of Broken Hill. The 3.4MW turbines will be the largest in Australia, with rotor blades spanning 130 metres sitting atop 110-metre towers.
“The larger wind turbine rotors provide an increased area to better capture the lower wind speeds and generate more energy,” says Adam Mackett, AGL Silverton’s wind farm project manager.
The 200MW capacity windfarm – the sixth largest in Australia – will cost $450m to build, generate about 150 local jobs during construction and should be fully operational by mid-2018.
With a capacity factor of 44.5% – at the high end for onshore windfarms – and an energy conversion guarantee built into the GE-CATCON contract, it is expected to generate 780,000MW hours of electricity a year. That’s enough to power 137,000 Australian homes and is equivalent to taking 192,000 cars off the road.
The guarantee “is a key commercial feature of the contract that was well received by the financiers”, says Mackett. Extensive wind turbine data will be used to track performance against the guarantee.
Renewable energy produced by the Silverton windfarm will be fed into the national energy market (NEM), which supplies electricity to Queensland, NSW, Victoria, Tasmania and South Australia. Less than 13% of the NEM’s electricity is generated from renewables now.
The Silverton windfarm – acquired by PARF for $36m – is the first project to be built from scratch by the fund. AGL established PARF in early 2016 as a way to attract investment for renewable energy projects that will help it meet its obligations under Australia’s renewable energy target.
PARF acquired AGL’s 102MW Nyngan and 53MW Broken Hill solar plants as seed assets in November 2016 and will probably acquire AGL’s proposed 350MW windfarm at Coopers Gap, 180km north-west of Brisbane.
“The momentum we’re experiencing with PARF is pleasing and proves that investor support exists for large-scale renewables development. However, further comprehensive policy changes are required to facilitate Australia’s transition to a low-carbon economy,” says AGL’s managing director and chief executive, Andy Vesey, in a statement.
The fund’s goal is to invest $2bn to 3bn in 1,000MW of large-scale renewable energy projects. As well as AGL’s contribution of $200m, the investment fund QIC is providing $800m to PARF on behalf of the Future Fund and clients investing in QIC’s global infrastructure fund. The balance will comprise debt raised on a project-to-project basis.
Silverton will contribute about 2.4% to Australia’s renewable energy target. The target is to generate 33,000 gigawatt hours of electricity a year from large-scale renewable energy projects by 2020, enough electricity to power about five million houses and meet about 23.5% of Australia’s electricity needs.
In May 2016, the clean energy regulator reported that Australia needed to build 6,000MW of renewable energy capacity – in addition to the 13,652MW already in the system – to meet the target, which was cut from 41,000 gigawatt hours in 2015 after a 15-month review.
Webb says the lengthy review and the short policy time frame for the target have made attracting investment for renewable energy projects difficult.
“[PARF is] stepping in to bridge the gap in an environment where there’s not a lot of long term certainty,” she says.
Nonetheless, she is confident the 2020 target can be met, thanks to technological advances in the industry. Whereas typical wind turbines a decade ago generated just 1.5MW a turbine, newer models generate upwards of 3MW. “The target is continually getting easier to meet as time goes on, even though our deadline is getting closer,” she says.
State-based renewable energy targets and the lowering cost of renewable energy are also encouraging signs that the 2020 target will be met, says Webb.
However, she argues that an updated policy taking into account greenhouse gas emissions targets is needed. “We’ve been saying for a long time now that Australia badly needs some sort of aligned energy and emissions policy that goes beyond 2020 and we await its announcement,” she says.
Australia’s commitment to the Paris climate agreement is to reduce emissions to 26–28% of 2005 levels by 2030. But greenhouse gas emissions data released by the government in late 2016 forecast that Australia will miss that target. |
def _setup_screen(self, config):
self._image2d = image2Dfactory(self._input_type)(config, self._dt,
self.retina_index)
imagx, imagy = self.screen_to_image_map.map(self.grid[0],
self.grid[1])
xmax, ymax = imagx.max(), imagy.max()
xmin, ymin = imagx.min(), imagy.min()
self._interpolator = ImageTransform(
self._image2d.get_grid(xmin, xmax, ymin, ymax), [imagx, imagy]) |
def preset_value_shift(sp1, sp2, preset_shift):
shifted_sp1 = Spectrum(sp1.x, sp1.y)
shifted_sp2 = Spectrum(sp2.x - preset_shift, sp2.y)
return shifted_sp1, shifted_sp2, preset_shift |
<reponame>luoei/LSMSForwarderAndroid<gh_stars>1-10
package dev.luoei.app.tool.usb.share;
import android.os.Environment;
import android.os.Handler;
import android.os.HandlerThread;
import com.orhanobut.logger.AndroidLogAdapter;
import com.orhanobut.logger.CsvFormatStrategy;
import com.orhanobut.logger.DiskLogAdapter;
import com.orhanobut.logger.FormatStrategy;
import com.orhanobut.logger.Logger;
import java.io.File;
public class LogService {
private static final int MAX_BYTES = 500 * 1024; // 500K averages to a 4000 lines per file
public static void initLogger(){
String diskPath = Environment.getExternalStorageDirectory().getAbsolutePath();
String folder = diskPath + File.separatorChar + "logger/usbshare";
HandlerThread ht = new HandlerThread("AndroidFileLogger." + folder);
ht.start();
Handler handler = new DiskLogStrategy.WriteHandler(ht.getLooper(), folder, MAX_BYTES);
DiskLogStrategy logStrategy = new DiskLogStrategy(handler);
FormatStrategy formatStrategy = CsvFormatStrategy.newBuilder()
.tag("USB")
.logStrategy(logStrategy)
.build();
Logger.addLogAdapter(new DiskLogAdapter(formatStrategy));
Logger.addLogAdapter(new AndroidLogAdapter());
}
}
|
Due to high demand for its return to Alaska this upcoming May, Cunard has announced it will offer a full season of Alaska sailings from June through September in 2020. Doubling its programming from 2019, the season will include ten roundtrip voyages aboard the Queen Elizabeth.
Generally sailing out of Vancouver and traveling along the coasts of British Columbia and Alaska, Cunard’s summer 2020 voyages will last between nine and 12 nights,. When it’s not tracing the coast, the Queen Elizabeth will sail the narrow waterways of the Inside Passage, the Hubbard Glacier and Sawyer’s Glacier’s twin fjords Tracy Arm and Endicott Arm. Cunard’s Alaska cruises will include longer time in certain ports, including Skagway, Juneau, Ketchikan, Sitka and more.
Cunard’s summer 2020 season will also offer a few other itineraries. For the Fourth of July, the Queen Elizabeth will offer a three-night cruise to San Francisco. Returning to Vancouver, the ship will sail a 16-night cruise from July 5-21. Bookending the season, the Queen Elizabeth will offer north Pacific crossings from Yokohama and to Tokyo. This final trip will leave Vancouver on August 29, last 29 nights and arrive in Tokyo on September 28. Passengers can choose to combine any of these or the Alaska voyages however they want.
The Queen Elizabeth just wrapped up a refit this past November. The renovations included the launch of a the new Mareel Wellness & Beauty spa, as well as updated public spaces, bars, lounges and rooms. |
O N THE EVENING of May 7, 2007, 48-year-old Lata Duka was doing dishes in the kitchen of her home in Cherry Hill, New Jersey, when she heard a loud bang come from the front of the house. “It wasn’t a normal sound. I was very scared,” Lata recalls nearly a decade later.
Thinking someone was breaking in, Lata grabbed a chair from the kitchen table and hoisted it above her head, waiting for the intruder. Moments later a swarm of armed men burst through the front door and ran into her kitchen. “Put the chair down or I’ll shoot!” she says one exclaimed, pushing his gun against her chest.
The armed men were FBI agents and other law enforcement officials. As they searched the house, one of the men approached Lata. He was smiling.
“He kept asking me, where are my sons!” Lata remembers. “Just smiling and going up and down the stairs, asking me all the time, where are your sons? I told him my sons were at work. He just kept smiling at me.”
Lata didn’t know that at roughly the same time, authorities were conducting raids at separate locations in Cherry Hill to arrest her three sons, Dritan, Shain and Eljvir Duka. Over 100 officers and agents were involved in what at the time was one of the most high-profile counterterrorism arrests in the post-9/11 era.
The next morning, Chris Christie, then the U.S. attorney for New Jersey, appeared at a press conference flanked by law enforcement officials to announce the arrests. “The philosophy that supports and encourages jihad around the world against Americans came to live here in New Jersey and threatened the lives of our citizens through these defendants,” he said.
Christie said that five men apprehended the previous night — the three Duka brothers along with two friends, Mohamad Shnewer and Serdar Tatar — had been planning to launch a terrorist attack against the nearby Fort Dix military base. “Fortunately, law enforcement in New Jersey was here to stop them,” he said.
The press conference and ensuing case garnered national attention, and the brothers and their friends quickly became known as the “Fort Dix Five,” characterized in the media as a terrorist cell that intended to kill servicemen and attack facilities at the base. For Christie, now a possible contender for the GOP 2016 presidential nomination, the arrests would be a career turning point, helping galvanize his eventual rise to governor of New Jersey.
Entrapped by Razan Ghalayini
For the Duka family, the arrests marked a tragic turn. They had escaped the turmoil of the former Yugoslavia and managed to start anew in the United States, only to find three sons publicly branded as terrorists. Dritan, Shain and Eljvir, seized when they were 28, 26 and 23, would be convicted of conspiring to kill U.S. military personnel and sentenced to life in prison, devastating the Duka family and putting an end to their nascent American dream.
Beyond the sensational headlines is the story of paid FBI informants with long criminal histories who spent a year working to befriend the brothers and enlist them as terrorists. This effort, both expensive and time-consuming, nevertheless failed to convince the Duka brothers to take part in a violent attack. Indeed, over the course of hundreds of hours of surveillance, the plot against Fort Dix was never even raised with them.
In the years since these events occurred, the use of dubious informants in terrorism investigations by the FBI has become almost routine. When purported terror plots are “revealed,” they almost invariably involve paid government informants at every level of their ideation, facilitation and planning. But the story of the Duka brothers is an early example of this type of case — and it still stands out because of the deliberate and brazen way the brothers were entrapped by authorities, assisted by their paid informants. Indeed, one might argue that the targeting of the Dukas was the prototype for the program of state-orchestrated terrorism plots that continues today.
I N THE 1980s , Yugoslavia was in its final chaotic decade of existence. Lata Duka and her husband, Firik, both ethnic Albanian Muslims, decided to leave their small village of Spas in search of a better life for their three young boys.
The Dukas traveled by train across Europe to a refugee camp in Latina, Italy, where they stayed for a year. From there, they boarded a plane to Mexico City and made their way to the Rio Grande, which they crossed by canoe into Texas. Once across the border, the family spent 12 hours in the back of a pickup truck to Dallas, before finally heading east toward their final destination: the Bensonhurst neighborhood of Brooklyn, New York.
None of the Dukas spoke English at the time, and they had entered the country without legal documents. Firik found a job stocking shelves at a Korean-owned fruit stand, where he was paid $175 a week. He made flashcards to learn the names of the produce he was handling, and at night, he would come home and teach his wife the words he had learned. “Our way of life was to just take care of our families, just live simply, and teach the children how to work hard,” Firik says.
Life in Brooklyn wasn’t easy, and the Duka family was only getting bigger. Lata and Firik had two more children: a girl named Naze and a boy named Burim. When their oldest child, Dritan, or Tony as he’d come to be called, turned six, they sent him to public school. Because he could barely speak English, he fell behind the other kids. When Lata got notes from his teachers, she couldn’t read them.
Bensonhurst was known, in Brooklyn and beyond, as a home for ethnic mafias. “Growing up, the Russians would be with the Russians, Italians with the Italians, and the Albanians with the Albanians,” remembers Burim, the youngest of the four brothers. “The Albanians never started nothing, but sometimes, if someone came to us, we had to fight.” It wasn’t unusual for the boys to come home with a black eye or a bleeding lip. In time, they adapted to the street life of their neighborhood, developing thick Brooklyn accents and a swagger to match.
Tony, who had a temper, frequently got into fights at school. He knew he was heading down a bad path and dropped out during his freshman year, telling his father, “If I don’t, I’m either going to end up in jail or dead.” Reluctantly, Firik got his son a job at a wholesale food distributor, where he was driving delivery trucks.
Though he stopped attending classes, Tony continued to pick up his brother Shain from high school, where he eventually met a student named Jennifer Marino. The two fell in love, began dating, and a year later were engaged. Jennifer moved into the Duka family’s small apartment.
Like their older brother, Shain and Eljvir also dropped out of school to work, and spent more time hanging out on the streets. At various points, the three brothers were arrested on charges of disorderly conduct and marijuana possession.
Firik and Lata grew increasingly frustrated; they hadn’t moved their family halfway across the world to have them give up their education and get caught up in petty crime. They were at a loss for what to do, and overwhelmed by the challenges of life as immigrants in America. In an effort to keep their sons out of trouble, Firik moved the family out of Brooklyn to a two-bedroom apartment in suburban Cherry Hill, New Jersey. Tony, Jennifer and their newborn baby girl, Lejla, took one room, while Firik and Lata took the other. Shain, Eljvir, Naze and Burim all slept in the living room.
One day after leaving work, Shain and his girlfriend got into a car accident. While their injuries were minor, the experience shook Shain. “I realized that if I had died then I would have gone to hell,” Shain says of the experience, writing to The Intercept from a federal prison in Kentucky, where he’s currently incarcerated. “The accident made me realize that death can come at any moment so I better try and get right.”
Over the course of the next year, Shain began to take his Muslim identity more seriously. He stopped drinking and smoking pot, and says these changes in behavior opened up conversations about religion among the brothers. “I started to read the Quran a bit, and pray every now and then. It was a struggle because I didn’t want to be fake,” Shain says. “When I do something, I don’t want to be hypocritical. Over here praying and fasting, then over here in a nightclub smoking weed with a bunch of girls partying. No, I would try and do it wholeheartedly.”
Lata and Firik, both practicing Muslims, were overjoyed by this change. “I had tears in my eyes when they were telling me they would start praying,” says Lata. As the tumult of their early years passed, the brothers began to settle into lives revolving around family and work, pooling their money to open a restaurant, which they named Dukas Pizza. They also became more religious. Their understanding of Islam was elementary and largely self-taught, and for the first time, they began attending mosque services on Fridays, praying five times a day and growing out their beards. They incorporated Islamic phrases into their everyday lives, greeting each other with “Salaam alaikum,” or “Peace be upon you.”
As the Dukas were changing, the United States was about to change, too. On September 11, 2001, hijacked planes crashed into the Word Trade Center towers and the Pentagon. “When it happened, I was driving to a job in Jersey. My kids called me from home and told me something had happened,” Firik says. “I used to deliver food in those buildings, and I would take Shain along with me. When he was a child, the Twin Towers were his favorite buildings in the city. We couldn’t believe this was happening.”
In the aftermath of the attacks, the national mood turned. The Dukas, like many others, were opposed to the subsequent wars launched by the Bush administration in Iraq and Afghanistan. In their view, the U.S. was waging an unfounded attack on two countries that had nothing to do with 9/11. “I was frustrated and against the wars. I believed the wars were unjust and wrong,” Shain wrote from prison. “They killed so many innocent people.”
The Dukas also began to grow increasingly disenchanted with the widespread mistreatment of Muslims. In Europe, the 2004 Madrid train bombing, believed to be carried out by an al Qaeda-inspired terror cell, was followed the next year by a series of attacks in London. Public officials in Europe and the U.S. began to warn of the threat posed by young Muslim men. “America was turning into a spy state, it used 9/11 as a stepping stone to justify this,” Shain says. “Not everyone was affected, so not everyone cared, but Muslim people felt it.”
Yet the Duka family continued to thrive. Firik had started his own roofing business, which the brothers decided to focus on full time, selling their pizzeria. By the end of 2005, the company employed a growing staff and the future seemed bright. The boys decided to do something they had done many times before as a family: take a vacation.
In January 2006, the Duka brothers and a group of friends, including Mohamad Shnewer, Eljvir Duka’s former schoolmate and future brother-in-law, took a trip to a cabin in the Poconos Mountains in Northern Pennsylvania. There, they did what any group of young men might do on vacation: they went skiing, played paintball in the woods, rode horses at the stables and went to the shooting range.
Tony brought his video camera to record his brothers and friends. After the trip, Burim and Shain took the tape from Tony’s camera to a Circuit City near their home in Cherry Hill. They wanted to make copies of the video to give to everyone who went on the trip.
The Circuit City clerk processing the videotape saw a group of young bearded men in the woods, skiing, shooting guns and riding horses. The Dukas, whose daily speech was often punctuated with Arabic phrases, could occasionally be heard saying “Allahu Akbar” on and off camera. While in earlier years a group of young Muslim men at the shooting range may not have aroused the panic of employees, in the heightened paranoia after 9/11, it was enough to trigger alarm.
The employee called the police and reported the tape.
T HE FOOTAGE REVEALED no evidence of a crime, but the Circuit City employee’s call to the police set in motion a series of events that would soon link the Dukas and their friends to Mahmoud Omar, a 36-year-old Egyptian immigrant who was also an FBI confidential informant.
In the 1970s, when the Senate was investigating the FBI’s notorious COINTELPRO domestic counterintelligence operations, the agency employed around 1,500 confidential informants. Today, that number has ballooned to 15,000 confidential informants. Many of these individuals have long-documented criminal histories or problems with their immigration status, and their entanglement with the law is exploited to coax them into helping generate criminal cases against people who have yet to commit concrete acts.
In 2006, the FBI approached Omar, who also lived in Cherry Hill. He had moved to the U.S. in the 1990s and made a living exporting cars to Egypt; in some cases, they had been reported stolen. Convictions for fraud littered his record. “They showed me a photograph and asked me who it was in the picture,” he told The Intercept by phone. “The FBI don’t come and ask you if you know someone if they don’t already know the answer.”
The man in the photograph was Mohamad Shnewer, Eljvir Duka’s friend and future brother-in-law. Omar knew Shnewer in passing from shopping at the Shnewer family’s halal grocery store. The FBI told Omar they needed to know what Shnewer and his friends were up to and asked Omar to become an informant. He agreed.
Shnewer was a taxi driver in his early 20s whose sister was engaged to Eljivir. The Duka brothers, who describe Shnewer as immature, seemed to be his only friends. They were older and had the cachet of being tough guys from Brooklyn. Shnewer was always trying to impress them, Burim remembers. “One time, he told us that a passenger in his taxi refused to pay the fare, so he got out of the car and hit him across the head with a baseball bat,” he says.
Burim believes that story, like many of the others Shnewer would tell, was a lie.
Omar began coming to the grocery store with increasing frequency to befriend Shnewer. For Shnewer, the older man quickly became a mentor and a confidant. As their relationship developed, they began to discuss politics, religion and the ongoing wars in Iraq and Afghanistan.
While it’s unclear how the conversations began, it’s apparent from the FBI’s recordings with the informant that Shnewer was receptive to the idea of violence. Shnewer told Omar that that he spent time on the Internet watching graphic combat footage from Iraq.
The informant encouraged his new young protégé, suggesting that Shnewer move beyond listening and talking; it was time to “do something,” Omar said, and the two began floating ideas of what that “something” might be. In August 2006, Omar and Shnewer began discussing the idea of launching an armed attack against Fort Dix military base, close to Trenton, New Jersey.
But only Omar and Shnewer were formulating plans for an attack. In a conversation recorded on August 2, 2006, Omar pressed Shnewer to come up with other recruits for their plot. “You and I are not enough, and you had told me that maybe there could be other people,” Omar said. “Otherwise, we can’t do anything.”
“No, no, no when I tell you I have people, that means I have people,” Shnewer responded. “Listen I will not talk to anyone about matters like these unless I trust them.”
In the same conversation, Shnewer brought up Serdar Tatar, also a close friend of the Dukas, whose father owned a pizzeria near the Fort Dix base. Tartar dreamed of becoming a police officer, and Shnewer knew this, according to the Burim and his parents. Nonetheless, Shnewer offered Tatar up as a possible co-conspirator, mentioning a map of Fort Dix he’d used to deliver pizza from his father’s shop to the base.
Mohamad Shnewer: You know Serdar? Who has the pizzeria close to here? Mahmoud Omar: So, what are your thoughts about him? Mohamad Shnewer: He is ready…. he has a map…. he used to deliver there. Mahmoud Omar: Ready to be killed? Mohamad Shnewer: Yes!
Two days later, Omar asked Shnewer again about possible conspirators for the attack.
“So who do you have in mind?” Omar asked.
Shnewer replied: “I have Tony, Eljvir and Shain in mind.”
I N U.S. CRIMINAL LAW , a conspiracy is an agreement between two or more persons to commit a crime at some time in the future. It is an agreement to break the law; it doesn’t have to be a plan. Once two individuals enter into an agreement, the crime is complete, though some statutes require evidence that concrete steps have been taken. But an individual cannot enter into a conspiracy with a government informant. So unless Shnewer could convince the others to join the plan to attack Fort Dix, there would be no criminal conspiracy.
Omar apparently felt more comfortable approaching Tatar than the Duka brothers and began courting the 23-year-old. He told him of the plot to attack Fort Dix and openly asked for his help: he needed the pizza delivery map.
Tatar, who had since left his father’s pizza shop and moved to Philadelphia, was working at a 7-Eleven when Sgt. Dean Dandridge of the Philadelphia Police Department came by for his daily coffee. On November 15, 2006, Tatar told Dandridge that he believed Omar might be planning a terrorist attack. Neither Tatar, nor Dandridge, had any way of knowing that Omar was an informant.
Dandridge left Tatar’s information with the FBI, expecting the bureau’s agents would be in touch soon. For three weeks, Tatar waited for the FBI to contact him. In the meantime, he recorded at least one conversation with Omar, so that when the authorities did reach out, he would have information to give them. Eventually and inexplicably, after repeated prodding, Tatar gave Omar the map of Fort Dix.
When a Philadelphia police detective assigned to the FBI’s Joint Terrorism Task Force spoke to Tatar, he downplayed the threat and refused the audio that Tatar had recorded. The agent asked Tatar if he had indeed given Omar the map. Suddenly scared, Tatar lied. That lie would later implicate him in the conspiracy.
Having succeeded in this haphazard way of ensnaring Tatar, Omar relentlessly tried to persuade Shnewer to set up a meeting with the Duka brothers to discuss “the plot.” But the meeting never seemed to materialize. Time and again, Shnewer found excuses to explain why this didn’t happen. For example, on September 14, 2006, Shnewer, after much hesitation, told Omar that Shain knew about the plot, but not of Omar’s involvement.
As months passed, Shnewer tried to assure an increasingly skeptical Omar that the Duka brothers were on board with the developing plans. When Shnewer failed to provide proof of their actual involvement, Omar pressed harder, asking Shnewer to pursue the brothers, and Eljvir Duka in particular. Between August 11 and September 19, 2006, Omar asked Shnewer about Eljvir 197 times.
Finally, after months of failed efforts, Omar told his FBI handlers that, in his estimation, Tony and Shain Duka knew nothing about the plot and seemed to be more focused on taking care of their families.
“I’m saying it again, those Dukas, they didn’t tell me nothing,” he said in a recent phone call with The Intercept. When asked how the FBI responded to his view of the Dukas, Omar replied: “They said it was none of my business. I just wear the wire and record.”
As Omar struggled to link the Duka brothers to the plot he’d developed with Shnewer, the FBI decided to introduce another informant into the case.
Besnik Bakalli, a 29-year-old undocumented immigrant from Albania, was sitting in a Philadelphia jail awaiting deportation when the FBI approached him about becoming an informant. Agents showed him pictures of the Duka brothers and told him to meet them at a Dunkin’ Donuts in Cherry Hill, where the Dukas often went after Friday prayers at the nearby Palmyra mosque.
When the Dukas walked into the Dunkin’ Donuts on a Friday in July 2006, Besnik was talking on the phone loudly in Albanian. The naturally gregarious Dukas overheard him and introduced themselves, ultimately befriending the informant. The FBI’s plan to quietly integrate their second informant into the lives of the Duka brothers was unfolding successfully.
Over the course of the next ten months, Bakalli saw the Duka family often. Over dinner with the brothers, Lata and Firik, he portrayed himself as a down-on-his-luck fellow Albanian, recently divorced and in dire emotional and financial straits. “He told us a former friend of his tried to rape his sister,” Shain says. “He got out of prison, heard the news, and got in an altercation, which killed this individual. After this, he said his life was in jeopardy. He came to America illegally and now is in a foreign land, alone and homesick. This was Besnik’s story to the family.”
The family took pity on Bakalli and took him in as one of their own. Firik Duka, whose roofing business continued to grow, hired him to work a few shifts at job sites around New Jersey and Philadelphia. Lata even tried to help Bakalli find a wife with whom to settle down.
Bakalli told the Dukas that he wanted to become a better Muslim, and the brothers agreed to help him. “This is when all the questions began to roll in,” Shain says. “What is jihad? Do we have to perform jihad? Me and my brothers did not take these questions as out of the ordinary. At that time all you heard on TV was jihad, terrorism, Islam this, Islam that. We thought he was just new and trying to understand, no red flags were raised!”
As they had both penetrated the same group of friends, Omar and Bakalli occasionally bumped into one another. Neither knew the other was an informant. “I hated the guy — didn’t like the look of him at all,” Omar told The Intercept.
The boys trusted Omar and Bakalli. Omar bonded with the Dukas over cars, a topic the brothers obsessed over. Surveillance transcripts reveal conversations with both informants that ranged from food to family to work.
World events, particularly those that affected Muslims, also came up. The men often discussed their opposition to U.S. involvement in the wars in Iraq and Afghanistan, then at their peak. They talked about the perceived targeting of Muslim-Americans by law enforcement and debated what role, if any, Muslims living in the U.S. had in assisting other Muslims resisting American aggression. They often couched their discussions of these topics in religious terms.
Shnewer and Omar spent much of their time together watching jihadi videos and listening to radical lectures on tape, often playing them in the Dukas’ presence. The Dukas also watched these videos, sometimes responding positively. Tony got particularly riled up by a lecture called Constants of the Path of Jihad by Anwar el-Awlaki, a Yemeni-American who would later be killed in a U.S. drone attack. He played the audio for his brothers and Bakalli, and in what would later be characterized as evidence of his radical beliefs, was recorded saying, “This is the real truth, straight up, no holds barred!”
Yet the brothers never talked about an actual plan to commit an act of terrorism. Discussing the various forms of jihad, Eljvir asserted, on questioning from Bakalli, that the daily struggle against personal vices like greed and lust is the greatest form of jihad.
In a conversation on September 22, 2006, Omar told Eljvir that he and Shnewer had been working on a “plan,” without providing specific details. Eljvir told them they should seek out a fatwa, or an Islamic legal opinion. While the prosecution would attempt to frame this comment as Eljvir seeking religious authorization for the Fort Dix plot, Omar undermined this claim at trial, conceding under cross-examination that Eljvir was unaware of plans pertaining to Fort Dix.
In other conversations, Bakalli continually pressed the Dukas to “do something,” and shamed them for not taking some kind of action to defend Muslims. During one heated conversation with Bakalli, Tony was recorded saying that he was “going to start something,” and that “you can do a lot of damage, man, seven people.” This statement would later be held as a damning self-indictment of the brothers’ intentions, but again, it never translated into real follow-up action or planning.
Despite their best efforts, Omar’s and Bakalli’s attempts to get the Dukas to put radical ideas into action didn’t gain traction. A month after Tony’s angry statements, Bakalli tried to get him to firm up plans to “do something.” At this point, Tony essentially recanted his incendiary words:
“We can’t … we … the biggest Jihad for us here in America is to spread Islam … That’s the most important thing. That is war, believe me. That is Jihad. Jihad is not just, like we say, to go fight. No people misunderstand it. … The first Jihad is with yourself, when the devil tells you, do this, you try, you fight with the devil. No, no, no. I won’t do it. Then the second Jihad is with your family. To work. To teach Islam to your children. Then you should spread Islam in, to tell others, this is Islam.”
Bakalli pressed, but Tony held firm. “Our biggest obligation for us is our family, especially for me with children,” he said.
In early 2007, the Dukas were joined by Bakalli, Shnewer and Omar on another “boys weekend” in the Poconos. The informants were promised horseback riding, hikes in the woods, “an epic game of paintball” and a shooting range. While playing paintball with Tony, Omar likened the game to military training. “This is like an army exactly,” he said, according to court testimony.
This second Poconos weekend, now infiltrated by two government informants, came and went without any discussion of a plot against military personnel. Instead, the brothers and their friends mostly spent hours watching videos of Eddie Murphy and Dave Chappelle stand-up comedy, in between horse riding and paint-balling.
At this point, roughly a year into the case, despite hundreds of hours of surveillance and the employment of two paid informants, the Dukas still had not been induced to commit any criminal act. The stakes were raised and an illegal gun deal was set up.
Firik Duka
HE DUKAS LOVED
guns; their Albanian heritage extolled firearms as a virtue of masculinity. “In Albania everybody has a gun in the house,” says Firik. “It’s normal for any man to have one there.”
Omar knew about the brothers’ enthusiasm, and he also knew that without proper immigration documents they couldn’t legally buy firearms in the U.S. It was a sore spot for the Duka brothers, all of whom had tried to apply or were in the process of applying for asylum status. In the Poconos, unlike other visitors who owned personal firearms, the Dukas had to wait in line for rentals at the shooting range.
In March 2007, Omar approached Tony with an offer: a friend in Baltimore with a gun shop was looking to make some under-the-counter sales of guns valued at the discounted price of $500 apiece. This offer was too good to pass up, and after being assured that this guy was “legit,” Tony agreed to take look at what Omar’s friend had in stock.
The boys knew the transaction wouldn’t be legal. “Being an illegal alien did prevent us from purchasing our own guns legally,” Shain says. “At the time, me and my family were in the immigration process. We even hired a lawyer, and we were going to do papers properly when that was done. We always believed that these guns could be transferred legally to my name once we received our papers.”
In a separate conversation that same month, Omar spoke with Shnewer without the Dukas present.
Mahmoud Omar: By the way, I want to ask you a question, I want you to tell me seriously. Eljvir and Tony, do they know, for example why we’re getting the handguns or … ? Mohamad Shnewer: Yeah, of course. Mahmoud Omar: Don’t tell me you didn’t tell them, Mohamad. Mohamad Shnewer: Yeah, they know. Mahmoud Omar: That we, for example, are training in anticipation for something like this in the future? Mohamad Shnewer: Yeah!
On March 28, 2007, Omar provided Tony with a list of available weapons from his fictional Baltimore source. This list had in fact been created by the FBI. Inexplicably, in addition to AK-47s, handguns and M16 rifles, it also included heavy weapons like a rocket-propelled grenade launcher — used to destroy tanks and other armored vehicles — as well as an M-60 machine gun. Burim, who was 15 at the time, remembers Tony coming home and wondering how Omar’s guy could be “legit” if he was selling RPGs and M-60s, which are heavily regulated in the United States.
On April 6, 2007, Tony went back to Omar and told him that he was interested in the AK-47s, the M-16s and the handguns, but not the heavy weapons. In a recorded conversation, he expressed concerns:
Tony Duka: Is there something I need to know? Mahmoud Omar: Like what? Tony Duka: Who … that list, there was some stuff on that list that was heavy shit … the RPG …. Yeah, with rockets. That’s why if you know something I don’t know, ah, please tell me man.
Omar assured Tony that his friend in Baltimore was trustworthy.
On May 7, 2007, Tony and Shain met Omar at his apartment, which had been paid for that month by the FBI. As the brothers inspected the firearms they planned to purchase, audio recordings reveal Tony commenting, “Now we don’t have to wait in line to shoot in Poconos.”
Minutes later, police burst into the apartment and wrestled Tony, Shain and Omar to the floor. “I had no idea what was going on when it happened,” Shain wrote from prison. “I assumed we were being arrested because of the guns, which I knew we were buying from Mahmoud illegally.”
The men were put into police cars and eventually taken away to a Philadelphia detention center.
While Shain and Tony were being arrested at Omar’s apartment, Burim and Eljvir were driving home after taking Tony’s five kids to a Mister Softee for ice cream. As they pulled up to Tony’s apartment, they noticed police cruisers and SWAT vans surrounding the building. Burim got out of the car to ask an agent what was going on, and both he and his brother were handcuffed.
Eljvir was transferred to the same detention center as Shain and Tony. The teenage Burim was not arrested, but left handcuffed under a tree while officers searched Tony’s apartment. Burim recalls an armed agent telling him, “Don’t grow up to be like your brothers.” He later added, “You should think about finding yourself a new religion.”
Tony, Shain and Eljvir spent the night wondering how they were going to get out of what they assumed would be gun charges.
The next morning, the brothers, along with Tatar and Shnewer, who had been seized in separate raids, were driven in a black-tinted police van past throngs of reporters and cameramen to the federal courthouse in Camden, New Jersey.
Inside, they were presented with a criminal complaint accusing them of conspiracy to murder U.S. military personnel. “I was confused at first, but for the most part I breathed easy when I saw that,” Shain says. “I figured they mixed us up with someone else and we’d be out of here as soon as we cleared things up.”
As Shain remembers, the boys were taken to a holding cell and instructed to read through the complaint in its entirety. Shain read aloud to the group. The complaint consisted almost entirely of Mohamad Shnewer’s private conversations with Mahmoud Omar. “After reading it we all turned to Shnewer,” Shain says. “Is this really true!? You went to a military base, you said this and that!? Who the hell is Confidential Witness #1?! Mahmoud Omar was an informant? Unbelievable! We were all pissed at Shnewer.”
It became clear to the brothers that Shnewer, in his conversations with Omar, had committed them to taking part in a “plot” to attack Fort Dix without their knowledge.
The five men were charged with conspiracy to attack military personnel, as well as with weapons offenses for the guns they had attempted to purchase from Mahmoud Omar.
At a press conference announcing the indictments, U.S. Attorney Chris Christie praised law enforcement for stopping an impending threat, painting a dark portrait of the alleged plotters. “Believe me, too,” he said. “These people were ready for martyrdom. They spoke about martyrdom extensively in the tapes. They said they were to do this in the service of Allah.”
A blacked-out van leaves federal court in Camden, N.J., Monday, Dec. 22, 2008 carrying five Muslim immigrants who were convicted Monday of plotting to massacre U.S. soldiers at a New Jersey military installation. (Mel Evans/AP)
T HE DUKAS WERE arrested in the spring of 2007, but not brought to court until the fall of 2008. In the interim, the brothers were held in pretrial solitary confinement at the Philadelphia Federal Detention Center. “The prison guards would ransack our cells and throw our Quran on the floor, but leave the rest of stuff alone,” Shain recalls. “We quickly realized that they were actually being serious about this.”
In opening arguments for the trial, presented in October 2008, the prosecutors’ case relied heavily on the two key informants. Omar was eventually paid $238,000 for his efforts, while Bakalli, who earned a minimum of $1,500 a week for his involvement, seems to have received additional benefits. He was facing deportation to Albania, where he had been involved in a shooting, and testified that in exchange for his cooperation with the FBI, he was allowed to remain in the U.S. The Albanian government also pardoned him.
Before proceedings commenced, New Jersey District Judge Robert B. Kugler granted a motion by prosecutors to keep the names of the jury anonymous, agreeing with the government that the trial represented an exceptional case requiring protection of the jurors’ identities.
At trial, Assistant U.S. Attorney William Fitzpatrick argued that the Duka brothers had been inspired by jihadist ideology. “Their motive was to defend Islam,” he told the jury. “Their inspiration was al Qaeda and Osama bin Laden. Their intent was to attack the U.S.”
The government set out to prove that between January 2006 and May 2007, each of the Duka brothers had entered into a conspiracy to murder members of the U.S. military. Prosecutors wouldn’t necessarily find a formal, written or oral agreement spelling out the details of the understanding. They just needed to demonstrate, based on the brothers’ “state of mind,” that the Dukas had knowingly and willfully entered into an agreement, and that at least one of the brothers had performed an overt act to further the agreement.
As was written in the jury instructions:
“Often the state of mind with which a person acts at any given time cannot be proved directly, because one cannot read another person’s mind and tell what he or she is thinking. However a defendant’s state of mind can be proved indirectly from the surrounding circumstances. Thus, to determine a particular defendant’s state of mind at a particular time, you may consider evidence about what the defendant said, what he did and failed to do, how he acted, and all the other facts and circumstances shown by the evidence that may prove what was in that defendant’s mind at that time.”
Since the Dukas were never recorded agreeing to take part in Shnewer’s and Omar’s plot to attack Fort Dix, the government had to prove they were still involved in other, more indirect ways.
For example, the court allowed into evidence the recording of Tony Duka saying he was “going to start something.” In future recordings, he seemed to repudiate this statement, saying, “the biggest Jihad for us here in America is to spread Islam.” But, as mere hearsay, the judge did not allow this statement or others to be presented to the jury unless the defendants were allowed to be cross-examined, meaning Tony would have had to give up his right not to testify. Even though the brothers wanted to take the stand, their lawyers urged them not to do so.
Prosecutors for previous U.S. terrorism cases have sought to establish participation in a conspiracy by displaying videos or websites found on a defendant’s computer that show frightening Islamist propaganda. Mahmoud Omar, during the time he spent with the Dukas’ co-defendant Mohamad Shnewer, asked Shnewer to download many of these videos, which the Dukas sometimes also watched. The prosecution played these videos to the court over the course of several days.
Shain described one juror’s reaction to a lengthy video pulled from Shnewer’s computer of U.S. soldiers being killed in battle by insurgent snipers: “Juror No. 3 got up from her seat before exiting for the break, gave us all a stare of death, turned around and slammed the binder of transcripts.” Juror No. 3, whose name remained concealed, would later tell the Philadelphia Inquirer that it was difficult for her to watch the video because her own son was a marine who had served two tours of duty in Iraq. “I thought I was seeing my son getting hit,” she told the paper.
The prosecutors claimed these videos, along with the Anwar al-Awlaki tapes, which the Duka brothers listened to in the presence of government informant Mahmoud Omar, served as inspiration and guidance for the Fort Dix operation.
To demonstrate this connection, the prosecution called Evan Kohlmann to the stand as an expert witness on Islamic terrorism and the use of digital media to promote terrorism. Kohlmann, who in 2014 was featured in a Human Rights Watch report on dubious terrorism prosecutions, testified that the defendants had been watching “some of al Qaeda’s best work,” and that their consumption of the videos suggested “a clear, considered, and present danger to the community.”
Yet Kohlmann’s analysis has come under considerable scrutiny in recent years. Fawaz Gerges, a professor of international relations at the London School of Economics, told New York magazine, which profiled the self-styled terrorism expert, that Kohlmann was in the “guilty verdict industry.” In an email to The Intercept, Gerges explained that prosecutors consider Kohlmann a “hired hand,” willing to say “whatever it takes” in front of a jury to help secure convictions.
During his testimony, Kohlmann portrayed the acquisition of guns from Mahmoud Omar, in addition to the heated statements the Dukas made about American foreign policy, as evidence of jihadist activity. As for the dearth of evidence substantiating an actual plot, Kohlmann told the jury, “It doesn’t take a lot of sophistication to kill people. Ultimately, it comes down to intent.”
On December 22, 2008, after six days of deliberation, the jury found the Duka brothers and their two friends guilty of conspiracy to kill members of the U.S. military at Fort Dix.
In determining sentences for federal crimes, judges take into account as a starting point the guidelines issued by the U.S. Sentencing Commission. The guidelines have “adjustments” that can be enacted at the judge’s discretion, which can fundamentally change the duration of a sentence. Among these, the terrorism adjustment has the most drastic effect of lengthening sentences.
The Dukas had been found guilty of one count of conspiracy to commit murder and three counts of illegal firearm possession. On those charges alone, they might have faced sentences of up to 24.5 years. But the prosecution requested that Judge Kugler apply the terrorism adjustment, which would dramatically increase that time.
On January 26, 2009, Judge Kugler received a handwritten letter from Mohamad Shnewer, who was awaiting sentencing in solitary confinement at the Philadelphia Federal Detention Center. In his letter, Shnewer described “boastful” discussions with the government informant and confessed to making “lies and allegations” about the Duka brothers’ knowledge of the Fort Dix plot. They were “clueless” about this plan, he wrote.
In April 2009, the Dukas, Tatar and Shnewer were brought before Judge Kugler for sentencing. Shain stood before the court and spoke out against the verdict. “A lot of money has been spent. Millions have been spent on this case. As if money has brought the truth of the matter,” Shain said. “We have stressed over and over again that they’ll lock you up for nothing, they’ll build a case on you. Today we have become victims of what we stressed so very often.”
Delivering Shain’s sentence, the culmination of a terrorism case that had lasted over two years, Judge Kugler said, “It’s not my place or desire at this time to review all the evidence … Suffice to say this defendant was in the middle of this plot. I’m realistic, I remember that they weren’t being taped 24 hours a day seven days a week.”
Brushing off the lack of direct evidence, Kugler added: “That there isn’t more explicit evidence does not concern me and obviously didn’t concern the jury either … I cannot deter this defendant, because of his belief system, from further crimes.”
Shain and Tony were sentenced to life in prison, plus 30 years. Eljvir, who was not convicted of the firearms offenses, received life in prison.
In a public statement made after the Dukas’ sentencing, acting U.S. Attorney Ralph Marra said, “The hatred and contempt these young men hold for America and the rule of law was made abundantly clear.” The life sentences were appropriate, he argued, to “protect the public from them and their deeply held, radical beliefs.”
I N THE YEARS since the convictions, the lives of the Dukas and Chris Christie, the U.S. attorney who prosecuted the brothers’ case, have taken vastly different trajectories. Christie won his race for governor, and is now a likely contender for the Republican presidential nomination.
Christie often cited the Duka case as a highlight of his career. In a 2012 speech to the American-Israel Public Affairs Committee (AIPAC), Christie recalled his success in the “uncovering of a plot to kill American servicemen and women,” telling a packed audience at the New York Hilton Hotel that he helped send to prison a group of “Muslim men practicing with semi-automatic weapons and screaming about jihad against the infidels.” Today, both the Republican Governors Association and the New Jersey Republican Party list the Fort Dix case as “one of Christie’s finest moments” under his biography.
Meanwhile, the Duka family is struggling. Tony’s five children are growing up without a father. Lata and Firik are faced with raising their grandchildren on their own. Burim, the youngest Duka brother, now 24, and the only one to escape entanglement in the case, dropped out of school to become the family’s primary breadwinner. The Dukas believe they have remained under surveillance. Firik says the FBI once came to the house and threatened to take Burim away. “We lost so much, and today we are barely surviving,” he says. “We live with broken hearts.”
While Shain is imprisoned at a high-security facility in Kentucky, Tony and Eljvir are being held at the infamous ADX Supermax prison in Florence, Colorado, which houses some of the nation’s most dangerous criminals and has famously been described as “a clean version of hell.” Tony and Eljvir have both spent portions of their sentences in solitary confinement, and Eljvir remains in isolation. Despite being locked up in the same prison for years, the two have never seen one another. Without the terrorism adjustment, they might have been released as middle-aged men. With it, they will likely die in prison.
Having exhausted all appeals, the brothers are filing a 2255, or writ of habeas corpus, which is a motion to set aside a sentence on the grounds that it was imposed in violation of federal law. Their appeal hinges on the argument of ineffective performance by their public defenders, but such appeals are rarely successful.
Far away from home, Shain, Tony and Eljvir’s periodic phone calls across the country are their only remaining link with their families. They say they find strength in God and knowledge of their innocence. Eljvir ends every call home with, “God willing, we will be reunited soon, not only in the next life, but this one too.”
Years later, the brothers still look back with incredulity at the events that led to their present situation. The needy friends exposed as government informants, the high-profile arrests and terrorism charges, and finally the life sentences that permanently altered the course of their lives. “We had plans for the future, we were expanding our business just weeks before, our families were growing,” Shain says. “Now, suddenly, we have been buried alive.”
More than seven years after the trial, the person who was arguably the most critical in securing the convictions still agonizes over his role in the case. In a recent interview with The Intercept, Mahmoud Omar, the informant, maintains that while Mohamad Shnewer was involved in the Fort Dix plot, the Dukas, whom he describes as “good people,” were innocent.
“I still don’t know why the Dukas are in jail,” he says.
Sheelagh McNeill contributed research to this report.
Photo Illustration: Connie Yu; Fort Dix: Mel Evans/AP |
Random number generation is an important aspect of many digital and electronic applications. For example, in the field of cryptography, random number generation is key to encryption algorithms. A random bit stream is a sequence of binary signals lacking discernible patterns or repetition over time.
In electrical circuitry, a random bit stream may be generated from a source that naturally exhibits random characteristics. For example, thermal noise in a CMOS field effect transistor (FET) channel injects a random component into the value of the current passing through that channel which can then be amplified to obtain a signal that is sufficiently random for a particular use. It is generally not possible to obtain a purely random signal by practical means, although it is possible in theory. For practical applications, what is sought is the ability to generate a signal that has a high degree of randomness, and thus a low degree of predictability, and which is suitable for use for the particular practical application.
However, generating a bit stream that has a sufficiently high degree of randomness based on a physical random phenomenon can be problematic. As is known in the art, the mere act of sampling the signal may interfere with the degree of randomness of the random physical phenomenon being measured. For example, in order to ensure a high degree of randomness, or unpredictability, the measurement circuitry should not introduce any bias into the probability that the measured value will be translated into a binary 0 or a binary 1. For example, if a sampling circuit measures a voltage level of noise at a given moment in time and compares it to a known threshold generated by the sampling circuitry, process and/or voltage and/or temperature variations may cause a drift in the threshold value over time, which may skew the sampling circuitry to translate more sampled values to one bit value or the other. Thus, the process is no longer truly random since there is no longer an equal chance of sampling a “1” or a “0”.
Current random bit stream generators inject a bias into the random bit stream generators that reduces the degree of unpredictability of the bit stream. A need exists for a method and an apparatus for generating a bit stream that has a sufficiently high degree of randomness (i.e, unpredictability) to be usable for a particular application. In particular, it would be desirable to generate a bit stream from a naturally occurring randomness source within the random bit generator circuitry itself. In addition, a need exists for preventing drift from randomness or unpredictability over time due to process and/or voltage and/or temperature variations. |
Perceived Risk of Heroin in Relation to Other Drug Use in a Representative US Sample ABSTRACT Low perception of risk is a risk factor for heroin use. Research is needed to determine whether this risk factor for heroin use is affected by the use of other drugs. Data were analyzed from participants in the 2015/2016 National Surveys on Drug Use and Health who denied lifetime heroin use (N= 110,102). We examined how recency of use of various drugs and number of drugs used relate to perceptions that using heroin is not a great risk. Results from multivariable models suggest that no lifetime drug use, and recent prescription opioid misuse and methamphetamine use, in particular, were associated with higher odds of perceiving that heroin use is not of great risk. Recent marijuana use was associated with lower odds of reporting that heroin use is not of great risk. Use of more drugs in ones lifetime, past year, and/or past month tended to be associated with lower odds of reporting heroin use as not a great risk. Prevention experts should consider that recent prescription opioid misuse in particular is a risk factor for the lower perception of risk, while individuals reporting no lifetime drug use may also require better education regarding harms associated with heroin use. |
<gh_stars>0
package main
import (
"flag"
"fmt"
"image"
"os"
_ "image/gif"
_ "image/jpeg"
_ "image/png"
)
const help = `imgstat - utility to know the size, width and height of an image.
Usage:
%s [-hv] <path/to/image>
`
type img struct {
height int
width int
size int64
verbose bool
}
func open(path string) (*img, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
stat, err := file.Stat()
if err != nil {
return nil, err
}
config, _, err := image.DecodeConfig(file)
if err != nil {
return nil, err
}
return &img{
size: stat.Size(),
width: config.Width,
height: config.Height,
}, nil
}
// String formats the output the image data
func (i *img) String() string {
size := i.size
unit := "B"
if i.size >= 1000 {
size = i.size / 1000
unit = "KB"
}
output := "%d%s\t%dx%d"
if i.verbose {
output = "size: %d%s\nwidth: %dpx\nheight: %dpx"
}
return fmt.Sprintf(output, size, unit, i.width, i.height)
}
func main() {
h := flag.Bool("h", false, "Usage information")
v := flag.Bool("v", false, "Verbose output")
flag.Parse()
if *h || len(flag.Args()) == 0 {
os.Stderr.WriteString(fmt.Sprintf(help, os.Args[0]))
os.Exit(1)
return
}
i, err := open(flag.Args()[0])
if err != nil {
os.Stderr.WriteString(err.Error() + "\n")
os.Exit(1)
return
}
i.verbose = *v
fmt.Println(i)
}
|
package com.orange.tbk.admin.config;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.PropertyAccessor;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.springframework.cache.annotation.CachingConfigurerSupport;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.Jackson2JsonRedisSerializer;
import org.springframework.data.redis.serializer.RedisSerializer;
import org.springframework.data.redis.serializer.StringRedisSerializer;
/**
* RedisTemple
*/
@Configuration
@EnableCaching
public class RedisConfig extends CachingConfigurerSupport {
@Bean
public RedisTemplate<String, Object> redisTemplate(RedisConnectionFactory connectionFactory) {
RedisTemplate<String, Object> redisTemplate = new RedisTemplate<>();
redisTemplate.setConnectionFactory(connectionFactory);
//使用Jackson2JsonRedisSerializer来序列化和反序列化redis的value值(默认使用JDK的序列化方式)
Jackson2JsonRedisSerializer jackson2JsonRedisSerializer = new Jackson2JsonRedisSerializer(Object.class);
ObjectMapper objectMapper = new ObjectMapper();
objectMapper.setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.ANY);
objectMapper.enableDefaultTyping(ObjectMapper.DefaultTyping.NON_FINAL);
jackson2JsonRedisSerializer.setObjectMapper(objectMapper);
//使用StringRedisSerializer来序列化和反序列化redis的key值
RedisSerializer redisSerializer = new StringRedisSerializer();
//key
redisTemplate.setKeySerializer(redisSerializer);
redisTemplate.setHashKeySerializer(redisSerializer);
//value
redisTemplate.setValueSerializer(jackson2JsonRedisSerializer);
redisTemplate.setHashValueSerializer(jackson2JsonRedisSerializer);
redisTemplate.afterPropertiesSet();
return redisTemplate;
}
}
|
Self-reported musculoskeletal problems amongst professional truck drivers Occupational driving has often been associated with a high prevalence of back pain. The factors that contribute to cause the pain are diverse and might include prolonged sitting, poor postures, exposure to whole-body vibration and other non-driving factors such as heavy lifting, poor diet or other psychosocial factors. In Europe, truck drivers are likely to be considered an at risk group according to the Physical Agents (Vibration) Directive and therefore risks will need to be reduced. This questionnaire-based study set out to examine the relationship between musculoskeletal problems and possible risk factors for HGV truck drivers to help prioritize action aimed at risk reduction. Truck drivers (n = 192) completed an occupational questionnaire with two measures of vibration exposure (weekly hours and distance driven). Items on manual handling, relevant ergonomics factors and musculoskeletal problems were also included. Reported exposures to vibration ranged from 12 to 85 h per week, with a mean of 43.8 h. Distances driven ranged from 256 to 6400 km (mean 2469 km). Most of the respondents (81%) reported some musculoskeletal pain during the previous 12 months and 60% reported low back pain. Contrary to expectations, vibration exposures were significantly lower among those who suffered musculoskeletal symptoms when distance was used as an exposure measure. Manual handling and subjective ratings of seat discomfort were associated with reported musculoskeletal problems. |
// Inited call all funcs that was registerd by OnInited
func Inited(s *Session) {
for _, f := range onInited {
f(s)
}
for _, f := range afterInited {
f(s)
}
} |
PL3.5 Efficacy and safety of selinexor in recurrent glioblastoma New treatment modalities are needed for recurrent glioblastoma (rGBM). Selinexor is a novel, oral selective inhibitor of nuclear export which forces nuclear retention of tumor suppressor proteins including p53 and p27, leading to apoptosis. We previously reported interim results showing tolerability, preliminary efficacy, and blood-brain barrier penetration in a surgical cohort (N=8). We now report updated results following completion of accrual to non-surgical cohorts (N=68). This is an open-label, multicenter, phase 2 study of selinexor monotherapy. Patients (pts) not undergoing surgery for measurable rGBM per response assessment neuro-oncology criteria (RANO) were enrolled in one of 3 arms encompassing different dosing schedules of selinexor (50 mg/m2 BIW, 60 mg BIW, and 80 mg QW). Treatment was continuous, although cycles were defined as 28 days and response was assessed every other cycle by MRI. Prior treatment with radiotherapy and temozolomide was required and prior bevacizumab was exclusionary. The primary endpoint was 6-month progression free survival (6mPFS) rate, calculated by the Kaplan-Meier method. 76 pts were enrolled; 24, 14 and 30 pts on doses of ~85 mg BIW, 60 mg BIW, and 80 mg QW, respectively. Median age was 56 years (range 2178). Median number of prior treatments was 2 (range 17) At the end of the 6 cycles, 30.2% pts on 80 mg QW were free from progression. The 6mPFS rate on 80 mg QW was 18.9%. Best RANO-defined responses (assessed locally) among 26 evaluable pts on 80 mg QW included 1 complete response, 2 partial responses, 7 stable disease, and 16 with progressive disease. Complete and partial responses were durable: the complete and a partial responder remain on selinexor for 393 and 1093 days respectively, as of the cut-off date. Median duration of response was 10.8 months. The most common related adverse events (all grades) in pts on ~85 mg BIW/60 mg BIW/80 mg QW were nausea (42%/64%/63%), leukopenia (38%/7%/43%), fatigue (71%/71%/47%), neutropenia (29%/14%/33%), decreased appetite (46%/71%/27%), and thrombocytopenia (67%/29%/23%). Selinexor demonstrated efficacy, with durable responses and disease stabilization in rGBM. Based on the favorable efficacy and safety profile, selinexor at a dose of 80 mg QW is recommended for further development in rGBM. |
<gh_stars>1-10
// SPDX-FileCopyrightText: 2020 <NAME>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// SPDX-License-Identifier: Apache-2.0
/* GPIO */
void gpio_set_dir(unsigned int );
void gpio_write(unsigned int );
unsigned int gpio_read();
void gpio_pull (unsigned char );
void gpio_im(unsigned int );
/* UART */
int uart_init(unsigned int , unsigned int );
int uart_puts(unsigned int , unsigned char *, unsigned int );
int uart_gets(unsigned int , unsigned char *, unsigned int );
/* SPI */
int spi_init(unsigned int , unsigned char , unsigned char , unsigned char );
unsigned int spi_status(unsigned int );
unsigned char spi_read(unsigned int );
int spi_write(unsigned int , unsigned char );
int spi_start(unsigned int );
int spi_end(unsigned int );
/* i2c */
int i2c_init(unsigned int , unsigned int );
int i2c_send(unsigned int , unsigned char , unsigned char );
/* PWM */
int pwm_init(unsigned int, unsigned int, unsigned int, unsigned int);
int pwm_enable(unsigned int);
int pwm_disable(unsigned int);
|
/**
* A Paged Data Source Factory provides a way to create and observe the last created data source.
*
* @author Osaigbovo Odiase
*/
@Singleton
public class MovieDataSourceFactory extends DataSource.Factory<Integer, Movie> {
public final MutableLiveData<MovieDataSource> movieDataSourceLiveData = new MutableLiveData<>();
private final RequestInterface requestInterface;
private final String sortType;
@Inject
public MovieDataSourceFactory(RequestInterface requestInterface, String sortType) {
this.requestInterface = requestInterface;
this.sortType = sortType;
}
/*
* After calling mDataSource.invalidate() method, mDataSource will be invalidated and the
* new DataSource instance will be created via DataSourceFactory.create() method,
* so its important to provide new DataSource() instance every time inside
* DataSourceFactory.create() method, do not provide same DataSource instance every time.
* mDataSource.invalidate() is not working, because after invalidation,
* DataSourceFactory provides the same, already invalidated DataSource instance.
* */
@Override
public DataSource<Integer, Movie> create() {
MovieDataSource movieDataSource = new MovieDataSource(requestInterface, sortType);
movieDataSourceLiveData.postValue(movieDataSource);
return movieDataSource;
}
} |
def check_duplicates(self, lst: List[HammerToolStep]) -> Tuple[bool, Set[str]]:
seen_names = set()
for step in lst:
if step.name in seen_names:
self.logger.error("Duplicate step '{step}' encountered".format(step=step.name))
return False, set()
else:
seen_names.add(step.name)
return True, seen_names |
<reponame>TeliaSoneraNorge/styleguide<gh_stars>10-100
//
// WARNING
//
// Do not make manual changes to this file.
// This file was generated by scripts/generate-icons.js.
//
// Generated from: arrow-large-down.svg
//
import React from 'react';
import cs from 'classnames';
interface Props {
style?: React.CSSProperties;
className?: string;
title?: string;
}
export function ArrowLargeDownIcon(props: Props) {
return (
<svg
className={cs('Icon', 'Icon--arrow-large-down', props.className)}
style={props.style}
aria-hidden={props.title ? undefined : true}
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 64 64"
>
{props.title && <title>{props.title}</title>}
<path
fillRule="evenodd"
d="M12.662 32.513a2 2 0 012.704.026l.12.123L30 48.788V10a2 2 0 011.85-1.995L32 8a2 2 0 011.995 1.85L34 10v38.789l14.513-16.127a2 2 0 012.692-.259l.133.11a2 2 0 01.259 2.692l-.11.133-18.008 20.008-.141.14-.075.065-.085.065-.129.087-.206.111-.166.068-.19.058-.192.038-.131.015-.165.007-.162-.007-.166-.02-.105-.02-.167-.045-.15-.054-.154-.07-.144-.08-.164-.114-.133-.111-.168-.172-17.973-19.97a2 2 0 01.15-2.824z"
/>
</svg>
);
}
|
def std_z(self, x):
z = self.Txz.predict(x)
sigma = np.mean(z**2, axis=0)
z_std_ = np.sqrt(np.mean(sigma))
return z_std_ |
/*
* Copyright 2011, 2012 Odysseus Software GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.odysseus.staxon.json.jaxb;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import javax.xml.bind.JAXBContext;
import javax.xml.bind.JAXBException;
import javax.xml.bind.Marshaller;
import javax.xml.bind.Unmarshaller;
import javax.xml.bind.annotation.XmlType;
import javax.xml.namespace.QName;
import javax.xml.stream.XMLStreamReader;
import javax.xml.stream.XMLStreamWriter;
import junit.framework.Assert;
import org.junit.Test;
import de.odysseus.staxon.json.JsonXMLInputFactory;
import de.odysseus.staxon.json.JsonXMLOutputFactory;
import de.odysseus.staxon.json.jaxb.sample.SampleRootElement;
import de.odysseus.staxon.json.jaxb.sample.SampleType;
import de.odysseus.staxon.json.jaxb.sample.SampleTypeWithNamespace;
public class JsonXMLBinderTest {
@JsonXML
static class JsonXMLDefault {}
@JsonXML(autoArray = true, namespaceDeclarations = false, namespaceSeparator = '_', prettyPrint = true, virtualRoot = true)
static class JsonXMLCustom {}
@JsonXML(virtualRoot = true, multiplePaths = "/elements")
static class JsonXMLVirtualSampleRootElement {}
@JsonXML(namespaceDeclarations = false, namespaceMappings = {"{urn:staxon:jaxb:test}p"})
static class JsonXMLNamespaceMappings {}
@XmlType
static class EmptyType {}
@Test
public void testIsBindable() {
Assert.assertTrue(new JsonXMLBinder().isBindable(SampleRootElement.class));
Assert.assertTrue(new JsonXMLBinder().isBindable(SampleType.class));
Assert.assertFalse(new JsonXMLBinder().isBindable(getClass()));
}
@Test
public void testCreateInputFactory() throws JAXBException {
JsonXMLInputFactory factory = new JsonXMLBinder().createInputFactory(SampleRootElement.class, JsonXMLDefault.class.getAnnotation(JsonXML.class));
Assert.assertEquals(Boolean.TRUE, factory.getProperty(JsonXMLInputFactory.PROP_MULTIPLE_PI));
Assert.assertEquals(Character.valueOf(':'), factory.getProperty(JsonXMLInputFactory.PROP_NAMESPACE_SEPARATOR));
Assert.assertNull(factory.getProperty(JsonXMLInputFactory.PROP_VIRTUAL_ROOT));
factory = new JsonXMLBinder().createInputFactory(SampleRootElement.class, JsonXMLCustom.class.getAnnotation(JsonXML.class));
Assert.assertEquals(Boolean.TRUE, factory.getProperty(JsonXMLInputFactory.PROP_MULTIPLE_PI));
Assert.assertEquals(Character.valueOf('_'), factory.getProperty(JsonXMLInputFactory.PROP_NAMESPACE_SEPARATOR));
Assert.assertEquals(new QName("sampleRootElement"), factory.getProperty(JsonXMLInputFactory.PROP_VIRTUAL_ROOT));
}
@Test
public void testCreateOutputFactory() throws JAXBException {
JsonXMLOutputFactory factory = new JsonXMLBinder().createOutputFactory(SampleRootElement.class, JsonXMLDefault.class.getAnnotation(JsonXML.class));
Assert.assertEquals(Boolean.TRUE, factory.getProperty(JsonXMLOutputFactory.PROP_MULTIPLE_PI));
Assert.assertEquals(Character.valueOf(':'), factory.getProperty(JsonXMLOutputFactory.PROP_NAMESPACE_SEPARATOR));
Assert.assertNull(factory.getProperty(JsonXMLOutputFactory.PROP_VIRTUAL_ROOT));
Assert.assertEquals(Boolean.TRUE, factory.getProperty(JsonXMLOutputFactory.PROP_NAMESPACE_DECLARATIONS));
Assert.assertEquals(Boolean.FALSE, factory.getProperty(JsonXMLOutputFactory.PROP_PRETTY_PRINT));
Assert.assertEquals(Boolean.FALSE, factory.getProperty(JsonXMLOutputFactory.PROP_AUTO_ARRAY));
factory = new JsonXMLBinder().createOutputFactory(SampleRootElement.class, JsonXMLCustom.class.getAnnotation(JsonXML.class));
Assert.assertEquals(Boolean.TRUE, factory.getProperty(JsonXMLOutputFactory.PROP_MULTIPLE_PI));
Assert.assertEquals(Character.valueOf('_'), factory.getProperty(JsonXMLOutputFactory.PROP_NAMESPACE_SEPARATOR));
Assert.assertEquals(new QName("sampleRootElement"), factory.getProperty(JsonXMLOutputFactory.PROP_VIRTUAL_ROOT));
Assert.assertEquals(Boolean.FALSE, factory.getProperty(JsonXMLOutputFactory.PROP_NAMESPACE_DECLARATIONS));
Assert.assertEquals(Boolean.TRUE, factory.getProperty(JsonXMLOutputFactory.PROP_PRETTY_PRINT));
Assert.assertEquals(Boolean.TRUE, factory.getProperty(JsonXMLOutputFactory.PROP_AUTO_ARRAY));
}
@Test
public void testMarshallSampleRootElement() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
StringWriter result = new StringWriter();
Class<?> type = SampleRootElement.class;
SampleRootElement sampleRootElement = new SampleRootElement();
sampleRootElement.attribute = "hello";
sampleRootElement.elements = Arrays.asList("world");
XMLStreamWriter writer = new JsonXMLBinder().createXMLStreamWriter(type, config, result);
Marshaller marshaller = JAXBContext.newInstance(type).createMarshaller();
new JsonXMLBinder().marshal(type, config, marshaller, writer, sampleRootElement);
writer.close();
String json = "{\"sampleRootElement\":{\"@attribute\":\"hello\",\"elements\":\"world\"}}";
Assert.assertEquals(json, result.toString());
}
@Test
public void testMarshallSampleRootElementWithVirtualRoot() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
StringWriter result = new StringWriter();
Class<?> type = SampleRootElement.class;
SampleRootElement sampleRootElement = new SampleRootElement();
sampleRootElement.attribute = "hello";
sampleRootElement.elements = Arrays.asList("world");
XMLStreamWriter writer = new JsonXMLBinder().createXMLStreamWriter(type, config, result);
Marshaller marshaller = JAXBContext.newInstance(type).createMarshaller();
new JsonXMLBinder().marshal(type, config, marshaller, writer, sampleRootElement);
writer.close();
String json = "{\"@attribute\":\"hello\",\"elements\":[\"world\"]}";
Assert.assertEquals(json, result.toString());
}
@Test
public void testMarshallSampleType() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
StringWriter result = new StringWriter();
Class<?> type = SampleType.class;
SampleType sampleType = new SampleType();
sampleType.element = "hi!";
XMLStreamWriter writer = new JsonXMLBinder().createXMLStreamWriter(type, config, result);
Marshaller marshaller = JAXBContext.newInstance(type).createMarshaller();
new JsonXMLBinder().marshal(type, config, marshaller, writer, sampleType);
writer.close();
String json = "{\"sampleType\":{\"element\":\"hi!\"}}";
Assert.assertEquals(json, result.toString());
}
@Test
public void testMarshallSampleTypeWithNamespace() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
StringWriter result = new StringWriter();
Class<?> type = SampleTypeWithNamespace.class;
SampleTypeWithNamespace sampleTypeWithNamespace = new SampleTypeWithNamespace();
XMLStreamWriter writer = new JsonXMLBinder().createXMLStreamWriter(type, config, result);
Marshaller marshaller = JAXBContext.newInstance(type).createMarshaller();
new JsonXMLBinder().marshal(type, config, marshaller, writer, sampleTypeWithNamespace);
writer.close();
Matcher prefixMatcher = Pattern.compile("@xmlns:([a-z1-9]+)").matcher(result.toString());
Assert.assertTrue(prefixMatcher.find());
String prefix = prefixMatcher.group(1);
String json = String.format("{\"%s:sampleTypeWithNamespace\":{\"@xmlns:%s\":\"urn:staxon:jaxb:test\"}}", prefix, prefix);
Assert.assertEquals(json, result.toString());
}
@Test
public void testUnmarshallSampleRootElement() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleRootElement\":{\"@attribute\":\"hello\",\"elements\":[\"world\"]}}";
Class<SampleRootElement> type = SampleRootElement.class;
XMLStreamReader reader = new JsonXMLBinder().createXMLStreamReader(type, config, new StringReader(json));
Unmarshaller unmarshaller = JAXBContext.newInstance(type).createUnmarshaller();
SampleRootElement sampleRootElement = new JsonXMLBinder().unmarshal(type, config, unmarshaller, reader);
Assert.assertEquals("hello", sampleRootElement.attribute);
Assert.assertEquals("world", sampleRootElement.elements.get(0));
}
@Test
public void testUnmarshallSampleRootElementWithVirtualRoot() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
String json = "{\"@attribute\":\"hello\",\"elements\":[\"world\"]}";
Class<SampleRootElement> type = SampleRootElement.class;
XMLStreamReader reader = new JsonXMLBinder().createXMLStreamReader(type, config, new StringReader(json));
Unmarshaller unmarshaller = JAXBContext.newInstance(type).createUnmarshaller();
SampleRootElement sampleRootElement = new JsonXMLBinder().unmarshal(type, config, unmarshaller, reader);
Assert.assertEquals("hello", sampleRootElement.attribute);
Assert.assertEquals("world", sampleRootElement.elements.get(0));
}
@Test
public void testUnmarshallSampleType() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleType\":{\"element\":\"hi!\"}}";
Class<SampleType> type = SampleType.class;
XMLStreamReader reader = new JsonXMLBinder().createXMLStreamReader(type, config, new StringReader(json));
Unmarshaller unmarshaller = JAXBContext.newInstance(type).createUnmarshaller();
SampleType sampleType = new JsonXMLBinder().unmarshal(type, config, unmarshaller, reader);
Assert.assertEquals("hi!", sampleType.element);
}
@Test
public void testUnmarshallSampleTypeWithNamespace() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleTypeWithNamespace\":{\"@xmlns\":\"urn:staxon-jaxrs:test\"}}";
Class<SampleTypeWithNamespace> type = SampleTypeWithNamespace.class;
XMLStreamReader reader = new JsonXMLBinder().createXMLStreamReader(type, config, new StringReader(json));
Unmarshaller unmarshaller = JAXBContext.newInstance(SampleTypeWithNamespace.class).createUnmarshaller();
Assert.assertNotNull(new JsonXMLBinder().unmarshal(SampleTypeWithNamespace.class, config, unmarshaller, reader));
}
@Test
public void testUnmarshallSampleTypeWithNamespaceMapping() throws Exception {
JsonXML config = JsonXMLNamespaceMappings.class.getAnnotation(JsonXML.class);
Class<?> type = SampleTypeWithNamespace.class;
String json = "{\"p:sampleTypeWithNamespace\":null}";
XMLStreamReader reader = new JsonXMLBinder().createXMLStreamReader(type, config, new StringReader(json));
Assert.assertEquals("urn:staxon:jaxb:test", reader.getNamespaceContext().getNamespaceURI("p"));
Unmarshaller unmarshaller = JAXBContext.newInstance(SampleTypeWithNamespace.class).createUnmarshaller();
Assert.assertNotNull(new JsonXMLBinder().unmarshal(SampleTypeWithNamespace.class, config, unmarshaller, reader));
}
@Test
public void testWriteObjectSampleRootElement() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
SampleRootElement sampleRootElement = new SampleRootElement();
sampleRootElement.attribute = "hello";
sampleRootElement.elements = Arrays.asList("world");
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder().writeObject(SampleRootElement.class, config, context, writer, sampleRootElement);
String json = "{\"sampleRootElement\":{\"@attribute\":\"hello\",\"elements\":\"world\"}}";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteObjectSampleType() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
SampleType sampleType = new SampleType();
sampleType.element = "hi!";
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleType.class);
new JsonXMLBinder().writeObject(SampleType.class, config, context, writer, sampleType);
String json = "{\"sampleType\":{\"element\":\"hi!\"}}";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteObjectNull() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleType.class);
new JsonXMLBinder().writeObject(SampleType.class, config, context, writer, null);
Assert.assertEquals("null", writer.toString());
}
@Test
public void testWriteObjectNullWithVirtualRoot() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleType.class);
new JsonXMLBinder().writeObject(SampleType.class, config, context, writer, null);
Assert.assertEquals("null", writer.toString());
}
@Test
public void testReadObjectSampleRootElement() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleRootElement\":{\"@attribute\":\"hello\",\"elements\":[\"world\"]}}";
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
SampleRootElement sampleRootElement =
new JsonXMLBinder().readObject(SampleRootElement.class, config, context, new StringReader(json));
Assert.assertEquals("hello", sampleRootElement.attribute);
Assert.assertEquals("world", sampleRootElement.elements.get(0));
}
@Test
public void testReadObjectSampleType() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleType\":{\"element\":\"hi!\"}}";
JAXBContext context = JAXBContext.newInstance(SampleType.class);
SampleType sampleType =
new JsonXMLBinder().readObject(SampleType.class, config, context, new StringReader(json));
Assert.assertEquals("hi!", sampleType.element);
}
@Test
public void testReadObjectNull() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
Assert.assertNull(new JsonXMLBinder().readObject(SampleRootElement.class, config, context, new StringReader("null")));
}
@Test
public void testReadObjectNullWithVirtualRoot() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
Assert.assertNotNull(new JsonXMLBinder().readObject(SampleRootElement.class, config, context, new StringReader("null")));
}
@Test
public void testWriteArraySampleRootElement() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
List<SampleRootElement> list = new ArrayList<SampleRootElement>();
list.add(new SampleRootElement());
list.get(0).attribute = "hello";
list.add(new SampleRootElement());
list.get(1).attribute = "world";
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder().writeArray(SampleRootElement.class, config, context, writer, list);
String json = "[{\"sampleRootElement\":{\"@attribute\":\"hello\"}},{\"sampleRootElement\":{\"@attribute\":\"world\"}}]";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteArraySampleRootElement_Document() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
List<SampleRootElement> list = new ArrayList<SampleRootElement>();
list.add(new SampleRootElement());
list.get(0).attribute = "hello";
list.add(new SampleRootElement());
list.get(1).attribute = "world";
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder(false).writeArray(SampleRootElement.class, config, context, writer, list);
String json = "{\"sampleRootElement\":[{\"@attribute\":\"hello\"},{\"@attribute\":\"world\"}]}";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteArraySampleType() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
List<SampleType> list = new ArrayList<SampleType>();
list.add(new SampleType());
list.get(0).element = "hello";
list.add(new SampleType());
list.get(1).element = "world";
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleType.class);
new JsonXMLBinder().writeArray(SampleType.class, config, context, writer, list);
String json = "[{\"sampleType\":{\"element\":\"hello\"}},{\"sampleType\":{\"element\":\"world\"}}]";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteArraySampleRootElementWithNull1() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
List<SampleRootElement> list = new ArrayList<SampleRootElement>();
list.add(new SampleRootElement());
list.get(0).attribute = "hello";
list.add(null);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder().writeArray(SampleRootElement.class, config, context, writer, list);
String json = "[{\"@attribute\":\"hello\"},null]";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteArraySampleRootElementWithNull2() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
List<SampleRootElement> list = new ArrayList<SampleRootElement>();
list.add(null);
list.add(new SampleRootElement());
list.get(1).attribute = "hello";
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder().writeArray(SampleRootElement.class, config, context, writer, list);
String json = "[null,{\"@attribute\":\"hello\"}]";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteArraySampleRootElementWithNull3() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
List<SampleRootElement> list = new ArrayList<SampleRootElement>();
list.add(null);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder().writeArray(SampleRootElement.class, config, context, writer, list);
String json = "[null]";
Assert.assertEquals(json, writer.toString());
}
@Test
public void testWriteArrayEmpty1() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder().writeArray(SampleRootElement.class, config, context, writer, new ArrayList<SampleRootElement>());
Assert.assertEquals("[]", writer.toString());
}
@Test
public void testWriteArrayEmpty2() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
new JsonXMLBinder().writeArray(SampleRootElement.class, config, context, writer, new ArrayList<SampleRootElement>());
Assert.assertEquals("[]", writer.toString());
}
@Test
public void testWriteArrayNull() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleType.class);
new JsonXMLBinder().writeArray(SampleType.class, config, context, writer, null);
Assert.assertEquals("null", writer.toString());
}
@Test
public void testWriteArrayNullWithVirtualRoot() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
StringWriter writer = new StringWriter();
JAXBContext context = JAXBContext.newInstance(SampleType.class);
new JsonXMLBinder().writeArray(SampleType.class, config, context, writer, null);
Assert.assertEquals("null", writer.toString());
}
@Test
public void testReadArraySampleRootElement() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleRootElement\":[{\"@attribute\":\"hello\"},{\"@attribute\":\"world\"}]}";
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
List<SampleRootElement> list =
new JsonXMLBinder().readArray(SampleRootElement.class, config, context, new StringReader(json));
Assert.assertEquals(2, list.size());
Assert.assertEquals("hello", list.get(0).attribute);
Assert.assertEquals("world", list.get(1).attribute);
}
@Test
public void testReadArraySampleRootElement_DocumentArray() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "[{\"sampleRootElement\":{\"@attribute\":\"hello\"}},{\"sampleRootElement\":{\"@attribute\":\"world\"}}]";
JAXBContext context = JAXBContext.newInstance(SampleRootElement.class);
List<SampleRootElement> list =
new JsonXMLBinder().readArray(SampleRootElement.class, config, context, new StringReader(json));
Assert.assertEquals(2, list.size());
Assert.assertEquals("hello", list.get(0).attribute);
Assert.assertEquals("world", list.get(1).attribute);
}
@Test
public void testReadArraySampleTypeWithNull1() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleType\":[{\"element\":\"hi!\"},null]}";
JAXBContext context = JAXBContext.newInstance(SampleType.class);
List<SampleType> list = new JsonXMLBinder().readArray(SampleType.class, config, context, new StringReader(json));
Assert.assertEquals(2, list.size());
Assert.assertEquals("hi!", list.get(0).element);
Assert.assertNull(list.get(1).element);
}
@Test
public void testReadArraySampleTypeWithNull2() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "{\"sampleType\":[null,{\"element\":\"hi!\"}]}";
JAXBContext context = JAXBContext.newInstance(SampleType.class);
List<SampleType> list = new JsonXMLBinder().readArray(SampleType.class, config, context, new StringReader(json));
Assert.assertEquals(2, list.size());
Assert.assertNull(list.get(0).element);
Assert.assertEquals("hi!", list.get(1).element);
}
@Test
public void testReadArraySampleTypeWithNull_DocumentArray1() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "[{\"sampleType\":{\"element\":\"hi!\"}},null]";
JAXBContext context = JAXBContext.newInstance(SampleType.class);
List<SampleType> list = new JsonXMLBinder().readArray(SampleType.class, config, context, new StringReader(json));
Assert.assertEquals(2, list.size());
Assert.assertEquals("hi!", list.get(0).element);
Assert.assertNull(list.get(1));
}
@Test
public void testReadArraySampleTypeWithNull_DocumentArray2() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
String json = "[null,{\"sampleType\":{\"element\":\"hi!\"}}]";
JAXBContext context = JAXBContext.newInstance(SampleType.class);
List<SampleType> list = new JsonXMLBinder().readArray(SampleType.class, config, context, new StringReader(json));
Assert.assertEquals(2, list.size());
Assert.assertNull(list.get(0));
Assert.assertEquals("hi!", list.get(1).element);
}
@Test
public void testReadArrayNull() throws Exception {
JsonXML config = JsonXMLDefault.class.getAnnotation(JsonXML.class);
JAXBContext context = JAXBContext.newInstance(SampleType.class);
Assert.assertNull(new JsonXMLBinder().readArray(SampleType.class, config, context, new StringReader("null")));
}
@Test
public void testReadArrayNullWithVirtualRoot() throws Exception {
JsonXML config = JsonXMLVirtualSampleRootElement.class.getAnnotation(JsonXML.class);
JAXBContext context = JAXBContext.newInstance(SampleType.class);
Assert.assertNotNull(new JsonXMLBinder().readArray(SampleType.class, config, context, new StringReader("null")));
}
}
|
. Rationale and empirical construction of MEIS ("My experience of Illness" Scale)--a tool for self-rating of patients' attitudes toward their experience of schizophrenic illness, were presented. MEIS questionnaire consists of 6 scales. Each of them is an incomplete sentence which should be matched with one of five--most appropriate from the patients' perspective--answers presented as the so-called "visual analogy scale". Factor analysis of the MEIS supplies three-component solution with two scales-sentences in each factor. The factors may be interpreted as: identification of experience of illness with self, its evaluation, and patients' reflectiveness toward the experience of illness. Some evidences allow to interpret the summarized score of MDC as an indicator of the patients' whole attitude toward his or her experience of illness in the sense of its "integrating" with or "isolating" from other life experiences. The results of self-rating of patients' attitudes are moderately correlated with rating of them by an observing clinician (EIS scale). Empirical findings supporting reliability, validity, and feasibility of the MEIS were presented. It may be useful as a subjective measure of patients' attitudes toward experience of their illness. |
import * as fs from "fs";
import * as util from "util";
import * as url from "url";
import { PlaylistDal } from "./dal/dal-playlists";
import { Logger } from "./logger";
import { IConfiguration } from "./types/config";
import { ListItem } from "./types/list-item";
import * as express from "express";
import * as cors from "cors";
import { Playlist } from "playlist";
import bodyParser = require("body-parser");
import { SongDal } from "./dal/dal-songs";
// convert fs.readFile into Promise version of same
const readFile = util.promisify(fs.readFile);
// getting the configuration from file for now
async function getConfig(): Promise<IConfiguration> {
let content: any = await readFile("./config.json", "utf8");
return JSON.parse(content) as IConfiguration;
}
async function main() {
let playlists: PlaylistDal = new PlaylistDal();
let conf = await getConfig();
let mongoHost = process.env["MONGO_SERVICE_HOST"] || "localhost";
let mongoPort = process.env["MONGO_SERVICE_PORT_TCP"] || "27017";
let mongoDbName = process.env["MONGO_SERVICE_DB_NAME"] || "demo1";
let logger = new Logger(conf.logLevel);
let mongoConnStr =
"mongodb://" + mongoHost + ":" + mongoPort + "/" + mongoDbName;
logger.info("connecting to db:", mongoConnStr);
let songs: SongDal = new SongDal(logger, mongoConnStr);
songs.delete();
songs.populate("songs.json");
logger.info("loglevel:", logger.logLevel);
logger.debug("running with configuration: ", JSON.stringify(conf));
// no authentication yet, set our user as #2
let currentUserId = "2";
logger.debug("running with configuration: ", JSON.stringify(conf));
// now using a cors header (allow origin)
let app: express.Express = express();
app.use(bodyParser.json());
app.use(cors());
app.get(
"/v1/playlists",
(req: express.Request, res: express.Response): any => {
let urlParts = url.parse(req.url, true);
let query = urlParts.query;
let plists: Playlist[] = [];
if (req.query.id) {
let id: string = req.query.id;
let plist = playlists.getPlaylistById(id);
plists = [plist];
} else if (req.query.userId) {
let userId = req.query.userId;
let owned: boolean = req.query.onlyOwned === "true";
if (!owned) {
owned = false;
}
plists = playlists.getPlaylistsByUser(userId, owned);
}
if (plists) {
res.send(plists);
}
}
);
app.post(
"/v1/playlists/:id/",
(req: express.Request, res: express.Response): any => {
let urlParts = url.parse(req.url, true);
let listId: string = req.params.id as string;
let songId: string = urlParts.query.songId as string;
let err = playlists.addItemToPlaylist(currentUserId, listId, songId);
if (err) {
res.status(401).send(err.message);
}
res.end("success");
}
);
app.delete(
"/v1/playlists/:id/",
(req: express.Request, res: express.Response): any => {
let urlParts = url.parse(req.url, true);
let listId: string = req.params.id as string;
let songId: string = urlParts.query.songId as string;
let err = playlists.removeItemFromPlaylist(currentUserId, listId, songId);
if (err) {
res.status(401).send(err.message);
}
res.end("success");
}
);
// add a playlist
app.post(
"/v1/playlists",
(req: express.Request, res: express.Response): any => {
let listItem: Playlist = req.body;
if (listItem) {
let id: string = req.query.id;
let plist = playlists.addNewPlaylist(listItem.name, listItem.creatorId);
res.send(plist);
} else {
logger.error("Playlists API post: no list item in body!");
res.status(404).send("No Item to add");
}
}
);
// delete a playlist
app.delete(
"/v1/playlists",
(req: express.Request, res: express.Response): any => {
let urlParts = url.parse(req.url, true);
let listId: string = urlParts.query.id as string;
if (listId) {
let err = playlists.delPlaylist(currentUserId, listId);
if (err) {
res.status(401).send(err.message);
}
res.end("success");
} else {
logger.error("Playlists API delete: no id in query!");
res.status(404).send("No Item found to delete");
}
}
);
app.get("/v1/songs", async (req: express.Request, res: express.Response) => {
let urlParts = url.parse(req.url, true);
let id: string = req.query.id;
if (id) {
let song = await songs.getSongById(id);
res.send(song);
} else if (req.query.q) {
let query = req.query.q;
res.send(await songs.getSongSearch(query));
} else {
res.send(await songs.getAllSongs());
}
});
app.listen(3002, () => {
logger.info("Service listening on port 3002!");
});
}
main();
|
"If we want to repeat the same mistakes, this would be the way to do it — understating the cost of tax breaks for wealthy folks," Carmichael said.
The state has a projected $900 million surplus in the current fiscal year budget ending June 30. Big-ticket items related to school funding, pension fund payments and tax reform dominate discussion at the Capitol along with speculation about whether Kelly would veto a tax windfall bill. |
def skip_column(self, column):
if not self.meta.include_foreign_keys and column.foreign_keys:
return True
if not self.meta.include_primary_keys and column.primary_key:
return True
if (not self.meta.include_datetimes_with_default and
isinstance(column.type, sa.types.DateTime) and
column.default):
return True
if isinstance(column.type, types.TSVectorType):
return True
if self.meta.only_indexed_fields and not self.has_index(column):
return True
if not isinstance(column, sa.Column):
return True
return False |
<gh_stars>100-1000
// File include/eglplus/enums/texture_target.hpp
//
// Automatically generated file, DO NOT modify manually.
// Edit the source 'source/enums/eglplus/texture_target.txt'
// or the 'source/enums/make_enum.py' script instead.
//
// Copyright 2010-2019 <NAME>.
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
#include <eglplus/enumerations.hpp>
namespace eglplus {
/// Enumeration TextureTarget
/**
* @ingroup eglplus_enumerations
*/
EGLPLUS_ENUM_CLASS_BEGIN(TextureTarget, EGLenum)
#include <eglplus/enums/texture_target.ipp>
EGLPLUS_ENUM_CLASS_END(TextureTarget)
#if !EGLPLUS_NO_ENUM_VALUE_NAMES
#include <eglplus/enums/texture_target_names.ipp>
#endif
#if !EGLPLUS_NO_ENUM_VALUE_RANGES
#include <eglplus/enums/texture_target_range.ipp>
#endif
} // namespace eglplus
|
Validation of predicted anonymous proteins simply using Fishers exact test Motivation Genomes sequencing has become the primary (and often the sole) experimental method to characterize newly discovered organisms, in particular from the microbial world (bacteria, archaea, viruses). This generates an ever increasing number of predicted proteins the existence of which is unwarranted, in particular among those without homolog in model organisms. As a last resort, the computation of the selection pressure from pairwise alignments of the corresponding Open Reading Frames (ORFs) can be used to validate their existences. However, this approach is error-prone, as not usually associated with a significance test. Results We introduce the use of the straightforward Fishers exact test as a post processing of the results provided by the popular CODEML sequence comparison software. The respective rates of nucleotide changes at the non-synonymous vs. synonymous position (as determined by CODEML), are turned into entries into a 22 contingency table, the probability of which is computed under the Null hypothesis that they should not behave differently if the ORFs do not encode actual proteins. Using the genome sequences of two recently isolated giant viruses, we show that strong negative selection pressures do not always provide a solid argument in favor of the existence of proteins. Contact Jean-Michel.Claverie@univ-amu.fr Introduction Since the first two bacterial genomes were sequenced 25 years ago (;), partial and whole genome sequencing have become the method of choice in identifying and characterizing new microorganisms (bacteria, archaea, unicellular eukaryotes, viruses), revealing the stupendous extent of their diversity. By their simplicity of use and low cost, the most recent 3rd generation sequencing platforms () have made microbial genomics accessible to non-specialists (;), while a few large centers are fully taking advantage of their huge throughput to run biodiversity exploration projects of ever increasing dimensions (;;). The most recurring (and unexpected) lesson emerging from the analyses of these enormous datasets is that overall morphological and phylogenetic similarities, as well as similar metabolisms and lifestyles, could hide large differences in gene contents and encoded proteomes. Within microorganisms belonging to a given clade, such as eukaryotic classes, bacterial genera, or virus families, genomes are found to encode a subset of « core proteins (i.e. with homologues in all members) together with proteins unevenly distributed, some of which only present in a single species or strain. This dichotomy is best documented for bacteria and viruses for which core genes might only represent a small proportion of the pangenome, i.e. of all the genes occurring at least once among all clade members (, Claverie & Abergel, 2018. While there is little doubt that genes encoding proteins with homologs in multiple divergent members of a clade are real, the level of certainty is much lower when they only occur once, or within very close clade members where the corresponding Open Reading Frames (ORF) may occur by chance. Given the A+T richness of STOP codons (TAA, TAG, TGA), random ORFs are also statistically expected to occur at higher frequency in high G+C content genomes, increasing the risk of protein overprediction (). The uncertainty further increases when the predicted proteins are short (typically less than 100 residues), or do not exhibit neither a functional motif nor a significant sequence similarity in the reference databases (). Such cases, referred to as "ORFans" represent a large proportion of predicted microbial proteomes () in particular for large viruses (;;;Abergel & Claverie, 2020;). The validation of ORFans is important to document the intriguing evolutionary process of de novo gene creations from non-coding regions in prokaryotes, eukaryotes, and their viruses (;McLysaght & Guerzoni, 2015;Schltterer, 2015;Schmitz & Bornberg-Bauer, 2017;Van Oss & Carvunis, 2019). If the experimental validation of predicted proteins through mass-spectrometry has become easier, the technique remains inaccessible to many of the laboratories generating genomic data. It also requires the corresponding microorganisms to be isolated and cultivated, thus disregarding the increasing number of metagenomics assemblies (). Furthermore, certain proteins might only be expressed (and experimentally detectable) at specific time in the life cycle of an organisms, in certain environmental conditions, or in specific organs. Thus, our capacity of experimentally demonstrating the actual existence of predicted proteins has fallen much behind the overwhelming production of genomic data. To overcome this difficulty it has become customary to compute the selection pressure, i.e. the ratio of the synonymous vs. non-synonymous mutation rates as a way to validate bioinformatic protein predictions (e.g.: : ; Prabh & Rdelsperger, 2016); ). The concept/calculation of the selection pressure is based on the fact that proteins are made for a purpose and that their functions, directly derived from their amino-acid sequences, tend to be conserved throughout evolution. Accordingly, we expect that the non-synonymous positions of their coding regions will vary much less rapidly than the synonymous ones, the changes of which have lesser consequence on the organism's fitness. The concept of selection pressure, was most widely disseminated via the CODEML program of the PAML package for phylogenetic analysis. The computation requires the disposal of at least two homologous ORF sequences, and involves five straightforward steps: 1) from the comparison with the associated aminoacid sequence, each position in the ORF nucleotide sequence is classified as synonymous or non-synonymous in reference to the degeneracy of the genetic code. Their respective total numbers are denoted NS and NNS; 2) the two homologous amino-acid sequences are optimally aligned, then codon-wise converted into a nucleotide sequence alignment; 3) the observed nucleotide changes associated to the positions previously mapped as synonymous or non-synonymous are separately counted and are denoted nS and nNS; 4) one then forms the ratios dN= nNS/NNS and dS= nS/NS, separately quantifying the mutation rates at the two different types of positions; 5) finally, one compute the "selection pressure" as the ratio = dN/dS. The values of are intuitively interpreted as follows: < 1 will correspond to proteins the (beneficial) functions of which resist amino-acid changes, also said to evolve under negative (purifying) selection. This is by far the most frequent situation. In contrast, > 1 correspond to the less frequent cases where changes in protein are positively selected (i.e. adaptive evolution) either to modify or abolish its (detrimental) function. Although conceptually simple, the practical implementation of this analysis comes up against two contradictory constraints. The first is that it must be based on an alignment of impeccable quality, and therefore between two highly similar protein sequences. The second is that the number of substitutions must be sufficiently high, while keeping the probability of multiple substitutions at the same site negligible (which would distort the estimate of ds and dns). To our knowledge, the validity range of the method was never rigorously defined in terms of pair-wise sequence divergence (i.e. acceptable value ranges for NS, NNS, and the dS or dN ratios), although CODEML can compute a likelihood value for a large suite of adaptive evolution models (the grasp of which is beyond the reach of most of occasional users). Fortunately, the use of CODEML remains easily tractable if we only wish to compute from the pairwise alignment of two homologous ORFs in order to evaluate the quality of ab initio protein prediction, as presented in the next section. Methods For actual proteins, the non-synonymous and synonymous positions of the coding regions are expected to diverge at different speeds, thus leading to ≠1 in most cases. In contrast, in the case of false protein (ORF) predictions, the bioinformatic distinction made between non-synonymous and synonymous positions becomes irrelevant, and both types of positions are no longer expected to display a different mutational behavior. We then expect to remain close to one, within the range of random fluctuations. As the non-synonymous and synonymous positions are two mutually exclusive categories, we can evaluate how much both positions behave differently using Fisher's exact test in the analysis of the 2x2 contingency table computed from the pairwise alignment of two homologous protein predictions, as follows: Position type # Substituted # Non-substituted Non-synonymous nNS NNS-nNS Synonymous nS NS-nS Where nNS and nS are computed as the products dN.NNS and dS.NS, respectively. These values are directly read from the standard CODEML output, then rounded to the nearest integers to be compatible with Fisher's test. The probability (p-value) that both position types (synonymous and nonsynonymous) behave differently (hence that the ORFS prediction are dubious) can be calculated by any available implementation of Fisher's test (online or in R, for instance). The pairwise sequence alignments were analyzed using the PAML 4.9j package version for UNIX/Linux with the following relevant options: noisy=0; verbose=0; runmode=-2 (pairwise); seqtype = 1 (codons); CodonFreq=2; model=1; NSsites=0; icode=0 (universal code); fix_kappa=1; kappa=1; fix_omega=0 (omega to be estimated); omega=0.5. We apply the above procedure to the evaluation of the whole predicted proteomes of two virus sequenced in our laboratory, constituting the only two known members of the proposed Molliviridae giant virus family. The prototype of the family, Mollivirus sibericum was isolated from ancient Siberian permafrost () while the second member, Mollivirus kamchatka, was isolated from surface soil in Kamtchatka (Christo-Foroux, et al., 2020). Both are "giant" DNA viruses infecting the protozoan Acanthamoeba. A stringent gene annotation of M. sibericum was initially performed using transcriptomic data (stranded RNA-seq reads) in addition to the standard protein-coding prediction methods (). M. kamchatka proteome prediction (Christo-Foroux, et al., 2020) was performed without RNA-seq data but taking into account protein similarity with M. sibericum. Gene predictions were further curated using the web-based genomic annotation editing platform Web Apollo (). The selection pressure analysis was performed using CODEML as previously described (Christo-Foroux, et al., 2020). Finally, the codon adaptation index (CAI) of both mollivirus predicted proteomes was performed using the CAI tool from the Emboss package (). Results A total of 495 and 480 genes were predicted for M. sibericum and M. kamchatka, with the encoded proteins ranging from 51 to 2,171 residues and from 57 to 2,176 residues, respectively (Christo-Foroux, et al., 2020). While the two isolates are very close to each other, sharing 463 of their predicted proteins as best reciprocal matches (with 92% identical residues on average, using BlastP), they are also very different from all other known organisms with 60% of their predicted proteins lacking a detectable Validating protein prediction with Fisher's exact test homolog among cellular organisms or previously sequenced viruses (outside of the proposed Molliviridae family). These ORFan-rich proteomes constitute an ideal test set for our proposed selection pressure-based validation procedure to distinguish proteins that are actually made from ORFs that might be conserved by chance between evolutionary close viruses. Figure 1 displays the selection pressure values computed for all pairs of ORFans (panel A) and non-ORFans (panel B). For comparison the exact same pairs of genes were also displayed in association to more traditional parameters such as their length, CAI, and (G+C) %. In all graphs, ORF pairs associated to non-significant Fisher's test p-values (thus less likely to correspond to actual proteins) are indicated by red dots. Fig. 1. Selection pressure values and other parameters associated to ORFans vs non-ORFans predicted Mollivirus proteins. Each dot corresponds to a pair of orthologous genes, the relative genomic position of which is indicated by the X-axis, separately for each column. The left/right columns correspond to ORFans/non-ORFans, respectively. ORFs associated to values not significantly different from 1 are in red (p-value >1/495, allowing for one false positive), others are in blue. Table 1 provides some of the numerical values distinguishing the ORFan vs. non ORFans gene populations as well as those associated to values non-significantly or significantly different from 1 (i.e. "dubious" vs. "confirmed" protein candidates), as displayed in Fig. 1. The most discriminant pattern in Fig. 1 is the larger proportion of red dots in the left columns. ORFans are more frequently associated to values non-significantly departing from one (68/190, 36%) than non-ORFans (15/125, 12%). This suggests that more than a third of predicted ORFans might not correspond to actual proteins. Yet, this result also shows that our testing procedure provides confirmations for all the others (64%)(Table 1). The much larger proportion of blue dots for the non-ORFans, confirms that the detection of homologs (even in very close species) is a reliable way to assess the reality of predicted proteins. However, our results indicate that 12% of them might be undergoing pseudogenization, despite appearing to remain under negative selection ( values <0.6, Fig. 1B). The utility of our value testing procedure is best illustrated by the combined consultation of Fig.1 Fig. 1 (A & B) shows that no unique minimal value threshold can be used to cleanly separate the two populations. Similarly, the predicted protein length distribution are significantly different between the red and blue dots (Table 1). However, if smaller ORF predictions are clearly less reliable, no clear length threshold could separate both distributions ( Fig.1 C & D). Finally, the computations of the CAI (Fig. 1, E & F) or G+C content (Fig. 1, G & H) do not bring in usable information to discriminate between reliable or unreliable protein predictions, given the very similar value distributions of these parameters for the red and blue dot populations. We started this work by noting that is defined as the ratio of two small quantities (dN/dS) themselves computed from a limited number of substitution events (ns, nNS) imposed by the necessity of flawless pairwise sequence alignments. Values of are thus highly sensitive to random fluctuations making them unreliable to assess the validity of protein prediction. We showed here that applying the Fischer's exact test to the standard CODEML output, provides a simple way to improve the reliability and predictive power of pressure selection computations. This procedure might thus constitute a useful addition to the standard genome annotation pipe-line and previously proposed software tools to help identify spurious ORFs (;). As a side benefit, the use of Fisher's test automatically filters out pairwise alignments that do not exhibit enough substitutions because they are too similar, or their alignments too short. The only parameter remaining to be fixed is the % of identical residues between orthologous proteins that should be greater than 70% (usually by imposing dS<2) to ensure high quality pairwise alignments and minimize the probability of multiple substitutions at one given site. |
Si/SiGe Epitaxy: a Ubiquitous Process for Advanced Electronics This contribution is focusing on low temperature epitaxy of SiGe alloys that are required in advanced devices. In a first part, we give a certain background on RTCVD and SiGe(C) materials. In a second step, we develop some specific applications important and fundamental in our technologies: selective epitaxy of SiGeC for bipolar base and Si/SiGe epitaxies for the fabrication of thin films CMOS. In each case, we present major improvements of the process capabilities or innovative structures. And finally, we propose the association of Si/SiGe epi and SiGe selective etch as an effective way to fabricate objects at the nano-scale. |
<filename>src/main/java/es/tid/rsvp/objects/SenderTSpec.java<gh_stars>0
package es.tid.rsvp.objects;
/**
* A.11 SENDER_TSPEC Class
SENDER_TSPEC class = 12.
* @author mcs
*
*/
public abstract class SenderTSpec extends RSVPObject{
public SenderTSpec(){
//dar valores de clase 12 y ctype 2
classNum = 12;
}
public SenderTSpec(byte[] bytes, int offset){
//dar valores de clase 12 y ctype 2
//decode()
}
}
|
#include <bits/stdc++.h>
#define mp make_pair
#define f1 first
#define f2 second
#define int long long
#define pb push_back
#define pii pair<int ,int>
#define ios ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0);
using namespace std;
const int mox=2e5+9;
int na[mox];
vector<int> v[mox];
bool visit[mox][2];
int h[mox][2];
queue<int> q[2];
void f(int x){
while(!q[x].empty()){
int u=q[x].front();
//cout<<u<<endl;
q[x].pop();
for(int e:v[u]){
if(!visit[e][x]){
h[e][x]=h[u][x]+1;
visit[e][x]=1;
q[x].push(e);
}
}
}
}
main(){
int n;
cin>>n;
memset(h,63,sizeof h);
for(int i=0;i<n;i++){
cin>>na[i];
if(i+na[i]<n){
v[i+na[i]].pb(i);
}
if(i-na[i]>=0){
v[i-na[i]].pb(i);
}
if(na[i]%2){
q[1].push(i);
visit[i][1]=1;
h[i][1]=0;
}
else{
q[0].push(i);
visit[i][0]=1;
h[i][0]=0;
}
}
f(0);
f(1);
for(int i=0;i<n;i++){
if(na[i]%2==0){
if(h[i][1]==4557430888798830399){
cout<<-1<<" ";
}
else{
cout<<h[i][1]<<" ";
}
}
else{
if(h[i][0]==4557430888798830399){
cout<<-1<<" ";
}
else{
cout<<h[i][0]<<" ";
}
}
}
} |
Disk dwarf galaxy as the progenitor of the Andromeda giant stream Abstract We present a study of the morphology of a progenitor galaxy that has been disrupted and formed a giant southern stellar stream in the halo of Andromeda galaxy(M31). N-body simulations of a minor merger of M31 with a dwarf galaxy suggest that the progenitor's rotation plays an important role in the formation of an asymmetric surface brightness distribution of the stream. |
package com.iot.noisemap.noiserecorder;
import android.Manifest;
import android.animation.Animator;
import android.animation.AnimatorListenerAdapter;
import android.annotation.TargetApi;
import android.app.LoaderManager.LoaderCallbacks;
import android.content.CursorLoader;
import android.content.Loader;
import android.content.SharedPreferences;
import android.content.pm.PackageManager;
import android.database.Cursor;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Bundle;
import android.provider.ContactsContract;
import android.support.v4.app.ActivityCompat;
import android.support.v7.app.AppCompatActivity;
import android.text.TextUtils;
import android.util.Log;
import android.view.KeyEvent;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.inputmethod.EditorInfo;
import android.widget.ArrayAdapter;
import android.widget.AutoCompleteTextView;
import android.widget.Button;
import android.widget.EditText;
import android.widget.TextView;
import android.widget.Toast;
import com.android.volley.DefaultRetryPolicy;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.toolbox.StringRequest;
import com.android.volley.toolbox.Volley;
import com.iot.noisemap.noiserecorder.network.rest.RestCallFactory;
import org.json.JSONObject;
import java.security.InvalidParameterException;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Pattern;
/**
* A login screen that offers login via username/password.
*/
public class LoginActivity extends AppCompatActivity implements LoaderCallbacks<Cursor> {
private final static int ACCESS_LOCATION_AND_RECORD_AUDIO_REQUEST_CODE = 1;
/**
* Keep track of the login task to ensure we can cancel it if requested.
*/
private UserLoginTask authTask = null;
private RegisterUserTask registerUserTask = null;
// UI references.
private AutoCompleteTextView usernameView;
private EditText passwordView;
private EditText confirmPasswordView;
private View progressView;
private View loginFormView;
private RequestQueue requestQueue;
/**
* Creates the activity.
* @param savedInstanceState saved state.
*/
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
this.setTitle("Login");
setContentView(R.layout.activity_login);
requestQueue = Volley.newRequestQueue(this);
ActivityCompat.requestPermissions(LoginActivity.this,
new String[]{android.Manifest.permission.ACCESS_FINE_LOCATION,
Manifest.permission.RECORD_AUDIO},
ACCESS_LOCATION_AND_RECORD_AUDIO_REQUEST_CODE);
// Set up the login form.
usernameView = (AutoCompleteTextView) findViewById(R.id.email);
passwordView = (EditText) findViewById(R.id.password);
confirmPasswordView = (EditText) findViewById(R.id.confirmPassword);
passwordView.setOnEditorActionListener(new TextView.OnEditorActionListener() {
@Override
public boolean onEditorAction(TextView textView, int id, KeyEvent keyEvent) {
if (id == EditorInfo.IME_ACTION_DONE || id == EditorInfo.IME_NULL) {
attemptLogin();
return true;
}
return false;
}
});
// load user data
loadUserData();
final Button btnSignIn = (Button) findViewById(R.id.btn_SignIn);
final Button btnRegister = (Button) findViewById(R.id.btn_register);
btnSignIn.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View view) {
btnSignIn.setEnabled(false);
btnRegister.setEnabled(false);
attemptLogin();
}
});
btnRegister.setOnClickListener(new OnClickListener() {
@Override
public void onClick(View view) {
btnSignIn.setEnabled(false);
btnRegister.setEnabled(false);
attemptRegister();
}
});
loginFormView = findViewById(R.id.login_form);
progressView = findViewById(R.id.login_progress);
}
/**
* Loads the user data.
*/
private void loadUserData() {
SharedPreferences settings = getSharedPreferences("UserData", 0);
usernameView.setText(settings.getString("Username", "").toString());
passwordView.setText(settings.getString("Password", "").toString());
}
/**
* Gets called when a permission request failed.
* @param requestCode Which request?
* @param permissions What permission?
* @param grantResults Grant result
*/
@Override
public void onRequestPermissionsResult(int requestCode, String permissions[], int[] grantResults) {
switch (requestCode) {
case ACCESS_LOCATION_AND_RECORD_AUDIO_REQUEST_CODE:
if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
// permission granted
} else {
Toast.makeText(this,"App needs those permissions to work!",Toast.LENGTH_LONG).show();
finish();
}
break;
}
}
/**
* Attempts to register the user at the server.
*/
private void attemptRegister() {
// http://noisemaprestapi.azurewebsites.net/api/Account/Register
//{
// "Username": "<xyz>",
// "Password": "<pw>",
// "ConfirmPassword": "<pw>"
//}
View focusView = null;
boolean cancel = false;
final Button btnSignIn = (Button) findViewById(R.id.btn_SignIn);
final Button btnRegister = (Button) findViewById(R.id.btn_register);
btnSignIn.setEnabled(true);
btnRegister.setEnabled(true);
// Store values at the time of the login attempt.
String email = usernameView.getText().toString();
String password = passwordView.getText().toString();
String confirmPassword = confirmPasswordView.getText().toString();
// Check for a valid password, if the user entered one.
if (!TextUtils.isEmpty(password) && !isPasswordValid(password)) {
passwordView.setError(getString(R.string.error_invalid_password));
focusView = passwordView;
cancel = true;
}
// Check for a valid email address.
if (TextUtils.isEmpty(email)) {
usernameView.setError(getString(R.string.error_field_required));
focusView = usernameView;
cancel = true;
} else if (!isEmailValid(email)) {
usernameView.setError(getString(R.string.error_invalid_email));
focusView = usernameView;
cancel = true;
}
if (cancel) {
// There was an error; don't attempt login and focus the first
// form field with an error.
focusView.requestFocus();
btnSignIn.setEnabled(true);
btnRegister.setEnabled(true);
} else {
// Show a progress spinner, and kick off a background task to
// perform the user login attempt.
showProgress(true);
registerUserTask = new RegisterUserTask(email, password, confirmPassword);
registerUserTask.execute((Void) null);
}
}
/**
* Attempts to sign in the account specified by the login form.
* If there are form errors (invalid email, missing fields, etc.), the
* errors are presented and no actual login attempt is made.
*/
private void attemptLogin() {
final Button btnSignIn = (Button) findViewById(R.id.btn_SignIn);
final Button btnRegister = (Button) findViewById(R.id.btn_register);
if (authTask != null) {
btnSignIn.setEnabled(true);
btnRegister.setEnabled(true);
return;
}
// Reset errors.
usernameView.setError(null);
passwordView.setError(null);
// Store values at the time of the login attempt.
String email = usernameView.getText().toString();
String password = <PASSWORD>();
boolean cancel = false;
View focusView = null;
// Check for a valid password, if the user entered one.
if (!TextUtils.isEmpty(password) && !isPasswordValid(password)) {
passwordView.setError(getString(R.string.error_invalid_password));
focusView = passwordView;
cancel = true;
}
// Check for a valid email address.
if (TextUtils.isEmpty(email)) {
usernameView.setError(getString(R.string.error_field_required));
focusView = usernameView;
cancel = true;
} else if (!isEmailValid(email)) {
usernameView.setError(getString(R.string.error_invalid_email));
focusView = usernameView;
cancel = true;
}
if (cancel) {
// There was an error; don't attempt login and focus the first
// form field with an error.
focusView.requestFocus();
btnSignIn.setEnabled(true);
btnRegister.setEnabled(true);
} else {
// Show a progress spinner, and kick off a background task to
// perform the user login attempt.
showProgress(true);
authTask = new UserLoginTask(email, password);
authTask.execute((Void) null);
}
}
/**
* Checks if the email is valid.
* @param email Email to check.
* @return Email valid?
*/
private boolean isEmailValid(String email) {
// http://emailregex.com/
Pattern isEmailValidPattern = Pattern.compile("(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])");
return isEmailValidPattern.matcher(email).matches();
}
/**
* Checks if the password is valid.
* @param password Email to check.
* @return Password valid?
*/
private boolean isPasswordValid(String password) {
return password.length() >= 4;
}
/**
* Shows the progress UI and hides the login form.
*/
@TargetApi(Build.VERSION_CODES.HONEYCOMB_MR2)
public void showProgress(final boolean show) {
// On Honeycomb MR2 we have the ViewPropertyAnimator APIs, which allow
// for very easy animations. If available, use these APIs to fade-in
// the progress spinner.
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB_MR2) {
int shortAnimTime = getResources().getInteger(android.R.integer.config_shortAnimTime);
loginFormView.setVisibility(show ? View.GONE : View.VISIBLE);
loginFormView.animate().setDuration(shortAnimTime).alpha(
show ? 0 : 1).setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
loginFormView.setVisibility(show ? View.GONE : View.VISIBLE);
}
});
progressView.setVisibility(show ? View.VISIBLE : View.GONE);
progressView.animate().setDuration(shortAnimTime).alpha(
show ? 1 : 0).setListener(new AnimatorListenerAdapter() {
@Override
public void onAnimationEnd(Animator animation) {
progressView.setVisibility(show ? View.VISIBLE : View.GONE);
}
});
} else {
// The ViewPropertyAnimator APIs are not available, so simply show
// and hide the relevant UI components.
progressView.setVisibility(show ? View.VISIBLE : View.GONE);
loginFormView.setVisibility(show ? View.GONE : View.VISIBLE);
}
}
@Override
public Loader<Cursor> onCreateLoader(int i, Bundle bundle) {
return new CursorLoader(this,
// Retrieve data rows for the device user's 'profile' contact.
Uri.withAppendedPath(ContactsContract.Profile.CONTENT_URI,
ContactsContract.Contacts.Data.CONTENT_DIRECTORY), ProfileQuery.PROJECTION,
// Select only email addresses.
ContactsContract.Contacts.Data.MIMETYPE +
" = ?", new String[]{ContactsContract.CommonDataKinds.Email
.CONTENT_ITEM_TYPE},
// Show primary email addresses first. Note that there won't be
// a primary email address if the user hasn't specified one.
ContactsContract.Contacts.Data.IS_PRIMARY + " DESC");
}
@Override
public void onLoadFinished(Loader<Cursor> cursorLoader, Cursor cursor) {
List<String> emails = new ArrayList<>();
cursor.moveToFirst();
while (!cursor.isAfterLast()) {
emails.add(cursor.getString(ProfileQuery.ADDRESS));
cursor.moveToNext();
}
addEmailsToAutoComplete(emails);
}
@Override
public void onLoaderReset(Loader<Cursor> cursorLoader) {
}
/**
* Adds the auto complete for the email.
* @param emailAddressCollection List of email addresses.
*/
private void addEmailsToAutoComplete(List<String> emailAddressCollection) {
//Create adapter to tell the AutoCompleteTextView what to show in its dropdown list.
ArrayAdapter<String> adapter =
new ArrayAdapter<>(LoginActivity.this,
android.R.layout.simple_dropdown_item_1line, emailAddressCollection);
usernameView.setAdapter(adapter);
}
private interface ProfileQuery {
String[] PROJECTION = {
ContactsContract.CommonDataKinds.Email.ADDRESS,
ContactsContract.CommonDataKinds.Email.IS_PRIMARY,
};
int ADDRESS = 0;
int IS_PRIMARY = 1;
}
private void enableButtons() {
final Button btnSignIn = (Button) findViewById(R.id.btn_SignIn);
final Button btnRegister = (Button) findViewById(R.id.btn_register);
btnSignIn.setEnabled(true);
btnRegister.setEnabled(true);
}
/**
* Represents an asynchronous login task used to authenticate the user.
*/
public class UserLoginTask extends AsyncTask<Void, Void, Boolean> {
private static final String TAG = "UserLoginTask";
private final String TOKEN_URL = Config.HOST_BASE_URL + "token";
private final String email;
private final String password;
UserLoginTask(String email, String password) {
this.email = email;
this.password = password;
}
@Override
protected Boolean doInBackground(Void... params) {
final String userName = usernameView.getText().toString();
final String password = passwordView.getText().toString();
StringRequest tokenRequest = RestCallFactory.createApiTokenRequest(userName,password,TOKEN_URL,LoginActivity.this);
// increase accepted timeout duration, because the azure web api seems to go into a
// standby-ish mode when it gets no request for some time
int acceptedTimeoutMs = 15000;
tokenRequest.setRetryPolicy(new DefaultRetryPolicy(
acceptedTimeoutMs,
DefaultRetryPolicy.DEFAULT_MAX_RETRIES,
DefaultRetryPolicy.DEFAULT_BACKOFF_MULT));
requestQueue.add(tokenRequest);
return true;
}
@Override
protected void onPostExecute(final Boolean success) {
authTask = null;
if (success) {
// finish();
} else {
passwordView.setError(getString(R.string.error_incorrect_password));
passwordView.requestFocus();
}
enableButtons();
}
@Override
protected void onCancelled() {
authTask = null;
showProgress(false);
enableButtons();
}
}
/**
* Represents an asynchronous registration task used to register the user.
*/
public class RegisterUserTask extends AsyncTask<Void, Void, Boolean> {
private static final String TAG = "UserRegisterTask";
private final String TOKEN_URL = Config.API_BASE_URL + "Account/Register";
private final String username;
private final String password;
private final String confirmPassword;
RegisterUserTask(String email, String password, String confirmPassword) {
this.username = email;
this.password = password;
this.confirmPassword = <PASSWORD>;
}
@Override
protected Boolean doInBackground(Void... params) {
Request<JSONObject> registerUserRequest = null;
try {
registerUserRequest = RestCallFactory.createRegisterUserRequest(username, password, confirmPassword, TOKEN_URL, LoginActivity.this);
} catch (InvalidParameterException e) {
Log.e("RegisterUserTask","invalid parameters"); // username, password or confirmationPassword == null
}
// increase accepted timeout duration, because the azure web api seems to go into a
// standby-ish mode when it gets no request for some time
int acceptedTimeoutMs = 15000;
registerUserRequest.setRetryPolicy(new DefaultRetryPolicy(
acceptedTimeoutMs,
DefaultRetryPolicy.DEFAULT_MAX_RETRIES,
DefaultRetryPolicy.DEFAULT_BACKOFF_MULT));
requestQueue.add(registerUserRequest);
return true;
}
@Override
protected void onPostExecute(final Boolean success) {
authTask = null;
if (success) {
// finish();
} else {
passwordView.setError(getString(R.string.error_incorrect_password));
passwordView.requestFocus();
}
enableButtons();
}
@Override
protected void onCancelled() {
authTask = null;
showProgress(false);
enableButtons();
}
}
}
|
<filename>jnode-core/test/java/jnode/report/ConnectionStatDataTest.java
/*
* Licensed to the jNode FTN Platform Develpoment Team (jNode Team)
* under one or more contributor license agreements.
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
* The jNode Team licenses this file to you under the
* Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package jnode.report;
import jnode.ftn.types.FtnAddress;
import junit.framework.TestCase;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.List;
/**
* @author Manjago (<EMAIL>)
*/
public class ConnectionStatDataTest {
private String path;
private ConnectionStatData.ConnectionStatDataElement e1;
private ConnectionStatData.ConnectionStatDataElement e2;
@Before
public void setUp() throws Exception {
File file = File.createTempFile("test", ".xml");
path = file.getAbsolutePath();
file.delete();
e1 = new ConnectionStatData.ConnectionStatDataElement();
e1.bytesReceived = 1;
e1.bytesSended = 2;
e1.incomingFailed = 3;
e1.incomingOk = 4;
e1.linkStr = "2:5020/828.17";
e1.outgoingFailed = 5;
e1.outgoingOk = 6;
e2 = new ConnectionStatData.ConnectionStatDataElement();
e2.bytesReceived = 11;
e2.bytesSended = 21;
e2.incomingFailed = 31;
e2.incomingOk = 41;
e2.linkStr = "2:5020/828.18";
e2.outgoingFailed = 51;
e2.outgoingOk = 61;
}
@After
public void tearDown() throws Exception {
new File(path).delete();
}
@Test
public void testLoad() throws Exception {
URL url = ConnectionStatDataTest.class.getResource("testload.xml");
Path src = new File(url.getPath()).toPath();
Path dest = new File(path).toPath();
Files.copy(src, dest, StandardCopyOption.REPLACE_EXISTING);
ConnectionStatData data = new ConnectionStatData(path);
List<ConnectionStatData.ConnectionStatDataElement> loaded = data.load();
check(loaded);
}
@Test
public void testLoadAndDrop() throws Exception {
URL url = ConnectionStatDataTest.class.getResource("testload.xml");
Path src = new File(url.getPath()).toPath();
Path dest = new File(path).toPath();
Files.copy(src, dest, StandardCopyOption.REPLACE_EXISTING);
ConnectionStatData data = new ConnectionStatData(path);
List<ConnectionStatData.ConnectionStatDataElement> loaded = data.loadAndDrop();
check(loaded);
loaded = data.load();
TestCase.assertNotNull(loaded);
TestCase.assertEquals(0, loaded.size());
}
@Test
public void testStore() throws Exception {
ConnectionStatData data = new ConnectionStatData(path);
data.store(new FtnAddress("2:5020/828.17"), e1);
data.store(new FtnAddress("2:5020/828.18"), e2);
List<ConnectionStatData.ConnectionStatDataElement> loaded = data.load();
check(loaded);
}
@Test
public void testStore2() throws Exception {
ConnectionStatData data = new ConnectionStatData(path);
data.store(new FtnAddress("2:5020/828.17"), e1);
e2.linkStr = null;
data.store(null, e2);
List<ConnectionStatData.ConnectionStatDataElement> loaded = data.load();
TestCase.assertNotNull(loaded);
TestCase.assertEquals(2, loaded.size());
TestCase.assertEquals(1, loaded.get(0).bytesReceived);
TestCase.assertEquals(2, loaded.get(0).bytesSended);
TestCase.assertEquals(3, loaded.get(0).incomingFailed);
TestCase.assertEquals(4, loaded.get(0).incomingOk);
TestCase.assertEquals("2:5020/828.17", loaded.get(0).linkStr);
TestCase.assertEquals(5, loaded.get(0).outgoingFailed);
TestCase.assertEquals(6, loaded.get(0).outgoingOk);
TestCase.assertEquals(11, loaded.get(1).bytesReceived);
TestCase.assertEquals(21, loaded.get(1).bytesSended);
TestCase.assertEquals(31, loaded.get(1).incomingFailed);
TestCase.assertEquals(41, loaded.get(1).incomingOk);
TestCase.assertNull(loaded.get(1).linkStr);
TestCase.assertEquals(51, loaded.get(1).outgoingFailed);
TestCase.assertEquals(61, loaded.get(1).outgoingOk);
}
private void check(List<ConnectionStatData.ConnectionStatDataElement> loaded) {
TestCase.assertNotNull(loaded);
TestCase.assertEquals(2, loaded.size());
TestCase.assertEquals(1, loaded.get(0).bytesReceived);
TestCase.assertEquals(2, loaded.get(0).bytesSended);
TestCase.assertEquals(3, loaded.get(0).incomingFailed);
TestCase.assertEquals(4, loaded.get(0).incomingOk);
TestCase.assertEquals("2:5020/828.17", loaded.get(0).linkStr);
TestCase.assertEquals(5, loaded.get(0).outgoingFailed);
TestCase.assertEquals(6, loaded.get(0).outgoingOk);
TestCase.assertEquals(11, loaded.get(1).bytesReceived);
TestCase.assertEquals(21, loaded.get(1).bytesSended);
TestCase.assertEquals(31, loaded.get(1).incomingFailed);
TestCase.assertEquals(41, loaded.get(1).incomingOk);
TestCase.assertEquals("2:5020/828.18", loaded.get(1).linkStr);
TestCase.assertEquals(51, loaded.get(1).outgoingFailed);
TestCase.assertEquals(61, loaded.get(1).outgoingOk);
}
@Test
public void testFindPos() throws Exception {
ConnectionStatData data = new ConnectionStatData(path);
List<ConnectionStatData.ConnectionStatDataElement> d = new ArrayList<>();
d.add(e1);
d.add(e2);
int pos = data.findPos(new FtnAddress("2:5020/828.17"), d);
TestCase.assertEquals(0, pos);
pos = data.findPos(new FtnAddress("2:5020/828.18"), d);
TestCase.assertEquals(1, pos);
pos = data.findPos(new FtnAddress("2:5020/828.19"), d);
TestCase.assertEquals(-1, pos);
}
}
|
/** Return a measure of case insensitive similarity between just the alphabetic portion
* of two authorship strings, ignoring commas, spaces, numbers, punctuation, and
* parentheses, in a range of 0 (no similarity) to 1 (no difference),
* using a measure of the string edit distance scaled to the length
* difference of the two strings.
*
* @param anAuthor one authorship string
* @param toOtherAuthor the second authorship string to make the comparason with.
* @return a double in the range 0 to 1 where 0 is no similarity and 1 is an exact match.
*/
public static double calulateSimilarityOfAuthorAlpha(String anAuthor, String toOtherAuthor) {
String au = toOtherAuthor.toLowerCase().replaceAll("[^A-Za-z]", "");
String au1 = anAuthor.toLowerCase().replaceAll("[^A-Za-z]", "");
return AuthorNameComparator.stringSimilarity(au, au1);
} |
teams=[raw_input(),raw_input()]
s=set()
first=set()
for i in range(input()):
time,team,num,card=raw_input().split()
if team+num not in first:
if card=='r' or team+num in s:
print teams[team=='a'], num, time
first.add(team+num)
else: s.add(team+num) |
Holiday Cookies!
What is it about the holiday time that makes us feel like we need to bake cookies and pies, and eat everything in sight?! Whatever the reason, it’s a great one because I love cookies and pies! This is our first holiday season eating on a keto diet so that means this is my first time making keto Christmas cookies. I wanted to keep it simple, but also make something a little different than your run of the mill Christmas cookies. That’s why I created these tasty Maple Macadamia Nut Cookies!
Maple Macadamia Nut Cookies
These cookies originally started off as just maple cookies. I had made many attempts at cookies over the past 6 months with no success. I first tried coconut flour and quickly learned that the texture is not appetizing or cookie like.I moved onto almond flour and still had trouble with the ratios.
It wasn’t until a couple weeks ago that I was truly determined and planned on baking as many cookies as it took to create the perfect almond flour cookies. It must have been my lucky day because I made these cookies on first attempt! I was so excited for Matt to get home and try them.
What About Macadamia
When Matt walked through the door the first thing I said was “I created the perfect cookie!” He loves coming home to anything new I’ve attempted to bake, even if it turns out horrible, so naturally, he grabbed one and bit into it. He looked to me with an excited look in his eye and said “What about Macadamia?!” I tried another and he couldn’t have been more right. His taste buds were on point!
Macadamia nuts are steep in price, but I wanted to make them so I splurged. Not to worry, however, you can use any nut you have at home! We got a small container of them, re-made them with the nuts, and I created my favorite cookie of all time – Maple Macadamia Nut Cookies! |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.