author
int64 658
755k
| date
stringlengths 19
19
| timezone
int64 -46,800
43.2k
| hash
stringlengths 40
40
| message
stringlengths 5
490
| mods
list | language
stringclasses 20
values | license
stringclasses 3
values | repo
stringlengths 5
68
| original_message
stringlengths 12
491
|
---|---|---|---|---|---|---|---|---|---|
49,738 | 24.03.2017 19:32:08 | 25,200 | 0dd095ffb8a951ccdd52e889d3dcb4bfb2c95545 | [MINOR] Fix code generator (templates, existing ops, input ordering) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java",
"diff": "@@ -43,7 +43,7 @@ public class CNodeTernary extends CNode\nreturn \" double %TMP% = %IN1% + %IN2% * %IN3%;\\n\" ;\ncase MINUS_MULT:\n- return \" double %TMP% = %IN1% - %IN2% * %IN3%;\\n;\\n\" ;\n+ return \" double %TMP% = %IN1% - %IN2% * %IN3%;\\n\" ;\ncase LOOKUP_RC1:\nreturn \" double %TMP% = %IN1%[rowIndex*%IN2%+%IN3%-1];\\n\";\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CellTpl.java",
"diff": "package org.apache.sysml.hops.codegen.template;\nimport java.util.ArrayList;\n+import java.util.Comparator;\nimport java.util.HashMap;\nimport java.util.HashSet;\n-import java.util.LinkedList;\n+import java.util.List;\n+import java.util.stream.Collectors;\nimport org.apache.sysml.hops.AggBinaryOp;\nimport org.apache.sysml.hops.AggUnaryOp;\n@@ -98,15 +100,12 @@ public class CellTpl extends BaseTpl\nrConstructCplan(hop, memo, tmp, inHops, compileLiterals);\nhop.resetVisitStatus();\n- //reorder inputs (ensure matrices/vectors come first, prune literals)\n- LinkedList<Hop> sinHops = new LinkedList<Hop>();\n- for( int i : new int[]{0,1,2} ) //matrices, vectors, scalars\n- for( Hop h : inHops ) //matrices\n- if( (i==0 && h.getDataType().isMatrix() && !TemplateUtils.isVector(h))\n- || (i==1 && h.getDataType().isMatrix() && TemplateUtils.isVector(h))\n- || (i==2 && h.getDataType().isScalar() && !tmp.get(h.getHopID()).isLiteral())) {\n- sinHops.add(h);\n- }\n+ //reorder inputs (ensure matrices/vectors come first) and prune literals\n+ //note: we order by number of cells and subsequently sparsity to ensure\n+ //that sparse inputs are used as the main input w/o unnecessary conversion\n+ List<Hop> sinHops = inHops.stream()\n+ .filter(h -> !(h.getDataType().isScalar() && tmp.get(h.getHopID()).isLiteral()))\n+ .sorted(new HopInputComparator()).collect(Collectors.toList());\n//construct template node\nArrayList<CNode> inputs = new ArrayList<CNode>();\n@@ -115,8 +114,8 @@ public class CellTpl extends BaseTpl\nCNode output = tmp.get(hop.getHopID());\nCNodeCell tpl = new CNodeCell(inputs, output);\ntpl.setCellType(TemplateUtils.getCellType(hop));\n- tpl.setSparseSafe((HopRewriteUtils.isBinary(hop, OpOp2.MULT) && hop.getInput().contains(sinHops.getFirst()))\n- || (HopRewriteUtils.isBinary(hop, OpOp2.DIV) && hop.getInput().get(0) == sinHops.getFirst()));\n+ tpl.setSparseSafe((HopRewriteUtils.isBinary(hop, OpOp2.MULT) && hop.getInput().contains(sinHops.get(0)))\n+ || (HopRewriteUtils.isBinary(hop, OpOp2.DIV) && hop.getInput().get(0) == sinHops.get(0)));\ntpl.setRequiresCastDtm(hop instanceof AggBinaryOp);\n// return cplan instance\n@@ -278,4 +277,28 @@ public class CellTpl extends BaseTpl\n|| isBinaryMatrixScalar || isBinaryMatrixVector || isBinaryMatrixMatrixDense\n|| isTernaryVectorScalarVector || isTernaryMatrixScalarMatrixDense);\n}\n+\n+ /**\n+ * Comparator to order input hops of the cell template. We try to order\n+ * matrices-vectors-scalars via sorting by number of cells and for\n+ * equal number of cells by sparsity to prefer sparse inputs as the main\n+ * input for sparsity exploitation.\n+ */\n+ public static class HopInputComparator implements Comparator<Hop>\n+ {\n+ @Override\n+ public int compare(Hop h1, Hop h2) {\n+ long ncells1 = h1.getDataType()==DataType.SCALAR ? Long.MIN_VALUE :\n+ h1.dimsKnown() ? h1.getDim1()*h1.getDim2() : Long.MAX_VALUE;\n+ long ncells2 = h2.getDataType()==DataType.SCALAR ? Long.MIN_VALUE :\n+ h2.dimsKnown() ? h2.getDim1()*h2.getDim2() : Long.MAX_VALUE;\n+ if( ncells1 > ncells2 )\n+ return -1;\n+ else if( ncells1 < ncells2)\n+ return 1;\n+ return Long.compare(\n+ h1.dimsKnown(true) ? h1.getNnz() : ncells1,\n+ h2.dimsKnown(true) ? h2.getNnz() : ncells2);\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/RowAggTpl.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/RowAggTpl.java",
"diff": "package org.apache.sysml.hops.codegen.template;\nimport java.util.ArrayList;\n+import java.util.Comparator;\nimport java.util.HashMap;\nimport java.util.HashSet;\n-import java.util.LinkedList;\n+import java.util.List;\n+import java.util.stream.Collectors;\nimport org.apache.sysml.hops.AggBinaryOp;\nimport org.apache.sysml.hops.AggUnaryOp;\n@@ -100,10 +102,10 @@ public class RowAggTpl extends BaseTpl {\nrConstructCplan(hop, memo, tmp, inHops, inHops2, compileLiterals);\nhop.resetVisitStatus();\n- //reorder inputs (ensure matrix is first input)\n- LinkedList<Hop> sinHops = new LinkedList<Hop>(inHops);\n- Hop X = inHops2.get(\"X\");\n- sinHops.remove(X); sinHops.addFirst(X);\n+ //reorder inputs (ensure matrix is first input, and other inputs ordered by size)\n+ List<Hop> sinHops = inHops.stream()\n+ .filter(h -> !(h.getDataType().isScalar() && tmp.get(h.getHopID()).isLiteral()))\n+ .sorted(new HopInputComparator(inHops2.get(\"X\"))).collect(Collectors.toList());\n//construct template node\nArrayList<CNode> inputs = new ArrayList<CNode>();\n@@ -216,4 +218,29 @@ public class RowAggTpl extends BaseTpl {\ntmp.put(hop.getHopID(), out);\n}\n+\n+ /**\n+ * Comparator to order input hops of the row aggregate template. We try\n+ * to order matrices-vectors-scalars via sorting by number of cells but\n+ * we keep the given main input always at the first position.\n+ */\n+ public static class HopInputComparator implements Comparator<Hop>\n+ {\n+ private final Hop _X;\n+\n+ public HopInputComparator(Hop X) {\n+ _X = X;\n+ }\n+\n+ @Override\n+ public int compare(Hop h1, Hop h2) {\n+ long ncells1 = h1.getDataType()==DataType.SCALAR ? Long.MIN_VALUE :\n+ (h1==_X) ? Long.MAX_VALUE :\n+ h1.dimsKnown() ? h1.getDim1()*h1.getDim2() : Long.MAX_VALUE-1;\n+ long ncells2 = h2.getDataType()==DataType.SCALAR ? Long.MIN_VALUE :\n+ (h2==_X) ? Long.MAX_VALUE :\n+ h2.dimsKnown() ? h2.getDim1()*h2.getDim2() : Long.MAX_VALUE-1;\n+ return (ncells1 > ncells2) ? -1 : (ncells1 < ncells2) ? 1 : 0;\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"diff": "@@ -254,13 +254,16 @@ public class TemplateUtils\npublic static boolean hasSingleOperation(CNodeTpl tpl) {\nCNode output = tpl.getOutput();\nreturn (output instanceof CNodeUnary || output instanceof CNodeBinary\n- || output instanceof CNodeTernary) && hasOnlyDataNodeInputs(output);\n+ || output instanceof CNodeTernary) && hasOnlyDataNodeOrLookupInputs(output);\n}\n- public static boolean hasOnlyDataNodeInputs(CNode node) {\n+ public static boolean hasOnlyDataNodeOrLookupInputs(CNode node) {\nboolean ret = true;\nfor( CNode c : node.getInput() )\n- ret &= (c instanceof CNodeData);\n+ ret &= (c instanceof CNodeData || (c instanceof CNodeUnary\n+ && (((CNodeUnary)c).getType()==UnaryType.LOOKUP0\n+ || ((CNodeUnary)c).getType()==UnaryType.LOOKUP_R\n+ || ((CNodeUnary)c).getType()==UnaryType.LOOKUP_RC)));\nreturn ret;\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java",
"diff": "@@ -72,8 +72,8 @@ public abstract class SpoofOperator implements Serializable\n|| inputs.get(i).isInSparseFormat() ) {\nMatrixBlock tmp = inputs.get(i);\nb[i-offset] = DataConverter.convertToDoubleVector(tmp);\n- LOG.warn(\"Converted \"+tmp.getNumRows()+\"x\"+tmp.getNumColumns() +\n- \" sideways input matrix from sparse to dense.\");\n+ LOG.warn(getClass().getName()+\": Converted \"+tmp.getNumRows()+\"x\"+tmp.getNumColumns()+\n+ \", nnz=\"+tmp.getNonZeros()+\" sideways input matrix from sparse to dense.\");\n}\n//use existing dense block\nelse {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix code generator (templates, existing ops, input ordering) |
49,738 | 25.03.2017 22:04:51 | 25,200 | 640df186afeaf3b52741cef670da7911b9d6d9a5 | Additional codegen plan selector: fuse-no-redundancy
This patch adds a second codegen plan selection heuristic:
fuse_no-redundancy, which creates non-overlapping fused operators, i.e.,
without any redundant computation. Furthermore, we now also do proper
memoization of processed hop nodes in both existing plan selectors to
improved optimization time. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"diff": "@@ -45,6 +45,9 @@ import org.apache.sysml.hops.codegen.template.TemplateBase;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.CloseType;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable;\n+import org.apache.sysml.hops.codegen.template.PlanSelection;\n+import org.apache.sysml.hops.codegen.template.PlanSelectionFuseAll;\n+import org.apache.sysml.hops.codegen.template.PlanSelectionFuseNoRedundancy;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntry;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntrySet;\nimport org.apache.sysml.hops.codegen.template.TemplateUtils;\n@@ -82,10 +85,10 @@ public class SpoofCompiler\npublic static boolean LDEBUG = false;\npublic static final boolean RECOMPILE_CODEGEN = true;\npublic static PlanCache PLAN_CACHE_POLICY = PlanCache.CSLH;\n- public static final PlanSelection PLAN_SEL_POLICY = PlanSelection.FUSE_ALL;\n+ public static final PlanSelector PLAN_SEL_POLICY = PlanSelector.FUSE_ALL;\npublic static final boolean PRUNE_REDUNDANT_PLANS = true;\n- public enum PlanSelection {\n+ public enum PlanSelector {\nFUSE_ALL, //maximal fusion, possible w/ redundant compute\nFUSE_NO_REDUNDANCY, //fusion without redundant compute\nFUSE_COST_BASED, //cost-based decision on materialization points\n@@ -567,4 +570,22 @@ public class SpoofCompiler\n}\nreturn ret;\n}\n+\n+ /**\n+ * Factory method for alternative plan selection policies.\n+ *\n+ * @return plan selector\n+ */\n+ public static PlanSelection createPlanSelector() {\n+ switch( PLAN_SEL_POLICY ) {\n+ case FUSE_ALL:\n+ return new PlanSelectionFuseAll();\n+ case FUSE_NO_REDUNDANCY:\n+ return new PlanSelectionFuseNoRedundancy();\n+ case FUSE_COST_BASED:\n+ default:\n+ throw new RuntimeException(\"Unsupported \"\n+ + \"plan selector: \"+PLAN_SEL_POLICY);\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/CPlanMemoTable.java",
"diff": "@@ -172,14 +172,8 @@ public class CPlanMemoTable\n}\n//core plan selection\n- switch( SpoofCompiler.PLAN_SEL_POLICY ) {\n- case FUSE_ALL:\n- new PlanSelectionFuseAll().selectPlans(this, roots);\n- break;\n- case FUSE_NO_REDUNDANCY:\n- case FUSE_COST_BASED:\n- throw new RuntimeException(\"Not implemented yet.\");\n- }\n+ PlanSelection selector = SpoofCompiler.createPlanSelector();\n+ selector.selectPlans(this, roots);\nif( SpoofCompiler.LDEBUG )\nLOG.info(\"#2: Memo after plan selection (\"+size()+\" plans)\\n\"+this);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelection.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelection.java",
"diff": "@@ -21,14 +21,18 @@ package org.apache.sysml.hops.codegen.template;\nimport java.util.ArrayList;\nimport java.util.Comparator;\n+import java.util.HashSet;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntry;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\nimport org.apache.sysml.hops.rewrite.HopRewriteUtils;\n+import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class PlanSelection\n{\n+ private final HashSet<VisitMark> _visited = new HashSet<VisitMark>();\n+\n/**\n* Given a HOP DAG G, and a set of partial fusions plans P, find the set of optimal,\n* non-conflicting fusion plans P' that applied to G minimizes costs C with\n@@ -54,11 +58,19 @@ public abstract class PlanSelection\n|| (me.type == TemplateType.CellTpl);\n}\n+ public boolean isVisited(long hopID, TemplateType type) {\n+ return _visited.contains(new VisitMark(hopID, type));\n+ }\n+\n+ public void setVisited(long hopID, TemplateType type) {\n+ _visited.add(new VisitMark(hopID, type));\n+ }\n+\n/**\n* Basic plan comparator to compare memo table entries with regard to\n* a pre-defined template preference order and the number of references.\n*/\n- protected class BasicPlanComparator implements Comparator<MemoTableEntry> {\n+ protected static class BasicPlanComparator implements Comparator<MemoTableEntry> {\n@Override\npublic int compare(MemoTableEntry o1, MemoTableEntry o2) {\n//for different types, select preferred type\n@@ -70,4 +82,25 @@ public abstract class PlanSelection\n3-o1.countPlanRefs(), 3-o2.countPlanRefs());\n}\n}\n+\n+ private static class VisitMark {\n+ private final long _hopID;\n+ private final TemplateType _type;\n+\n+ public VisitMark(long hopID, TemplateType type) {\n+ _hopID = hopID;\n+ _type = type;\n+ }\n+ @Override\n+ public int hashCode() {\n+ return UtilFunctions.longlongHashCode(\n+ _hopID, (_type!=null)?_type.hashCode():0);\n+ }\n+ @Override\n+ public boolean equals(Object o) {\n+ return (o instanceof VisitMark\n+ && _hopID == ((VisitMark)o)._hopID\n+ && _type == ((VisitMark)o)._type);\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseAll.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseAll.java",
"diff": "@@ -30,7 +30,13 @@ import org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntry;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\n-\n+/**\n+ * This plan selection heuristic aims for maximal fusion, which\n+ * potentially leads to overlapping fused operators and thus,\n+ * redundant computation but with a minimal number of materialized\n+ * intermediate results.\n+ *\n+ */\npublic class PlanSelectionFuseAll extends PlanSelection\n{\nprivate HashMap<Long, List<MemoTableEntry>> _bestPlans =\n@@ -49,6 +55,9 @@ public class PlanSelectionFuseAll extends PlanSelection\nprivate void rSelectPlans(CPlanMemoTable memo, Hop current, TemplateType currentType)\n{\n+ if( isVisited(current.getHopID(), currentType) )\n+ return;\n+\n//step 1: prune subsumed plans of same type\nif( memo.contains(current.getHopID()) ) {\nHashSet<MemoTableEntry> rmSet = new HashSet<MemoTableEntry>();\n@@ -81,5 +90,7 @@ public class PlanSelectionFuseAll extends PlanSelection\nTemplateType pref = (best!=null && best.isPlanRef(i))? best.type : null;\nrSelectPlans(memo, current.getInput().get(i), pref);\n}\n+\n+ setVisited(current.getHopID(), currentType);\n}\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseNoRedundancy.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.hops.codegen.template;\n+\n+import java.util.ArrayList;\n+import java.util.Comparator;\n+import java.util.HashMap;\n+import java.util.Map.Entry;\n+import java.util.HashSet;\n+import java.util.List;\n+\n+import org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.codegen.template.CPlanMemoTable.MemoTableEntry;\n+import org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\n+\n+/**\n+ * This plan selection heuristic aims for fusion without any redundant\n+ * computation, which, however, potentially leads to more materialized\n+ * intermediates than the fuse all heuristic.\n+ * <p>\n+ * NOTE: This heuristic is essentially the same as FuseAll, except that\n+ * any plans that refer to a hop with multiple consumers are removed in\n+ * a pre-processing step.\n+ *\n+ */\n+public class PlanSelectionFuseNoRedundancy extends PlanSelection\n+{\n+ private HashMap<Long, List<MemoTableEntry>> _bestPlans =\n+ new HashMap<Long, List<MemoTableEntry>>();\n+\n+ @Override\n+ public void selectPlans(CPlanMemoTable memo, ArrayList<Hop> roots) {\n+ //pruning and collection pass\n+ for( Hop hop : roots )\n+ rSelectPlans(memo, hop, null);\n+\n+ //take all distinct best plans\n+ for( Entry<Long, List<MemoTableEntry>> e : _bestPlans.entrySet() )\n+ memo.setDistinct(e.getKey(), e.getValue());\n+ }\n+\n+ private void rSelectPlans(CPlanMemoTable memo, Hop current, TemplateType currentType)\n+ {\n+ if( isVisited(current.getHopID(), currentType) )\n+ return;\n+\n+ //step 0: remove plans that refer to a common partial plan\n+ if( memo.contains(current.getHopID()) ) {\n+ HashSet<MemoTableEntry> rmSet = new HashSet<MemoTableEntry>();\n+ List<MemoTableEntry> hopP = memo.get(current.getHopID());\n+ for( MemoTableEntry e1 : hopP )\n+ for( int i=0; i<3; i++ )\n+ if( e1.isPlanRef(i) && current.getInput().get(i).getParent().size()>1 )\n+ rmSet.add(e1); //remove references to hops w/ multiple consumers\n+ memo.remove(current, rmSet);\n+ }\n+\n+ //step 1: prune subsumed plans of same type\n+ if( memo.contains(current.getHopID()) ) {\n+ HashSet<MemoTableEntry> rmSet = new HashSet<MemoTableEntry>();\n+ List<MemoTableEntry> hopP = memo.get(current.getHopID());\n+ for( MemoTableEntry e1 : hopP )\n+ for( MemoTableEntry e2 : hopP )\n+ if( e1 != e2 && e1.subsumes(e2) )\n+ rmSet.add(e2);\n+ memo.remove(current, rmSet);\n+ }\n+\n+ //step 2: select plan for current path\n+ MemoTableEntry best = null;\n+ if( memo.contains(current.getHopID()) ) {\n+ if( currentType == null ) {\n+ best = memo.get(current.getHopID()).stream()\n+ .filter(p -> isValid(p, current))\n+ .min(new BasicPlanComparator()).orElse(null);\n+ }\n+ else {\n+\n+ best = memo.get(current.getHopID()).stream()\n+ .filter(p -> p.type==currentType || p.type==TemplateType.CellTpl)\n+ .min(Comparator.comparing(p -> 3-p.countPlanRefs())).orElse(null);\n+ }\n+ }\n+\n+ //step 3: recursively process children\n+ for( int i=0; i< current.getInput().size(); i++ ) {\n+ TemplateType pref = (best!=null && best.isPlanRef(i))? best.type : null;\n+ rSelectPlans(memo, current.getInput().get(i), pref);\n+ }\n+\n+ setVisited(current.getHopID(), currentType);\n+ }\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1418] Additional codegen plan selector: fuse-no-redundancy
This patch adds a second codegen plan selection heuristic:
fuse_no-redundancy, which creates non-overlapping fused operators, i.e.,
without any redundant computation. Furthermore, we now also do proper
memoization of processed hop nodes in both existing plan selectors to
improved optimization time. |
49,738 | 27.03.2017 00:07:47 | 25,200 | d33537a9390a4d6e5b052786456aa82c0bb7a646 | Extended code generator (celltmpl w/ sumsq aggregation)
This patch extend the code generator w/ support for sum_squared
aggregation in cell templates, which is important if ran in default
optimization level w/ existing fused operators. Furthermore, this also
includes new test cases and a cleanup of imports in generated classes. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeCell.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.hops.codegen.cplan;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n+import org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.codegen.SpoofFusedOp.SpoofOutputDimsType;\nimport org.apache.sysml.runtime.codegen.SpoofCellwise.CellType;\n@@ -29,25 +30,24 @@ public class CNodeCell extends CNodeTpl\n{\nprivate static final String TEMPLATE =\n\"package codegen;\\n\"\n- + \"import java.util.Arrays;\\n\"\n- + \"import java.io.Serializable;\\n\"\n- + \"import java.util.ArrayList;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.LibSpoofPrimitives;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.SpoofCellwise;\\n\"\n+ + \"import org.apache.sysml.runtime.codegen.SpoofCellwise.AggOp;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.SpoofCellwise.CellType;\\n\"\n+ \"import org.apache.commons.math3.util.FastMath;\\n\"\n+ \"\\n\"\n+ \"public final class %TMP% extends SpoofCellwise {\\n\"\n+ \" public %TMP%() {\\n\"\n- + \" super(CellType.%TYPE%, %SPARSE_SAFE%);\\n\"\n+ + \" super(CellType.%TYPE%, %AGG_OP%, %SPARSE_SAFE%);\\n\"\n+ \" }\\n\"\n+ \" protected double genexec( double a, double[][] b, double[] scalars, int m, int n, int rowIndex, int colIndex) { \\n\"\n+ \"%BODY_dense%\"\n+ \" return %OUT%;\\n\"\n+ \" }\\n\"\n- + \"}\";\n+ + \"}\\n\";\nprivate CellType _type = null;\n+ private AggOp _aggOp = null;\nprivate boolean _sparseSafe = false;\nprivate boolean _requiresCastdtm = false;\nprivate boolean _multipleConsumers = false;\n@@ -73,6 +73,15 @@ public class CNodeCell extends CNodeTpl\nreturn _type;\n}\n+ public void setAggOp(AggOp aggop) {\n+ _aggOp = aggop;\n+ _hash = 0;\n+ }\n+\n+ public AggOp getAggOp() {\n+ return _aggOp;\n+ }\n+\npublic void setSparseSafe(boolean flag) {\n_sparseSafe = flag;\n}\n@@ -110,6 +119,7 @@ public class CNodeCell extends CNodeTpl\n//replace meta data information\ntmp = tmp.replaceAll(\"%TYPE%\", getCellType().name());\n+ tmp = tmp.replaceAll(\"%AGG_OP%\", (_aggOp!=null) ? \"AggOp.\"+_aggOp.name() : \"null\" );\ntmp = tmp.replaceAll(\"%SPARSE_SAFE%\", String.valueOf(isSparseSafe()));\nreturn tmp;\n@@ -146,10 +156,11 @@ public class CNodeCell extends CNodeTpl\nif( _hash == 0 ) {\nint h1 = super.hashCode();\nint h2 = _type.hashCode();\n- int h3 = Boolean.valueOf(_sparseSafe).hashCode();\n- int h4 = Boolean.valueOf(_requiresCastdtm).hashCode();\n+ int h3 = (_aggOp!=null) ? _aggOp.hashCode() : 0;\n+ int h4 = Boolean.valueOf(_sparseSafe).hashCode();\n+ int h5 = Boolean.valueOf(_requiresCastdtm).hashCode();\n//note: _multipleConsumers irrelevant for plan comparison\n- _hash = Arrays.hashCode(new int[]{h1,h2,h3,h4});\n+ _hash = Arrays.hashCode(new int[]{h1,h2,h3,h4,h5});\n}\nreturn _hash;\n}\n@@ -162,6 +173,7 @@ public class CNodeCell extends CNodeTpl\nCNodeCell that = (CNodeCell)o;\nreturn super.equals(that)\n&& _type == that._type\n+ && _aggOp == that._aggOp\n&& _sparseSafe == that._sparseSafe\n&& _requiresCastdtm == that._requiresCastdtm\n&& equalInputReferences(\n@@ -173,6 +185,7 @@ public class CNodeCell extends CNodeTpl\nStringBuilder sb = new StringBuilder();\nsb.append(\"SPOOF CELLWISE [type=\");\nsb.append(_type.name());\n+ sb.append(\", aggOp=\"+((_aggOp!=null) ? _aggOp.name() : \"null\"));\nsb.append(\", sparseSafe=\"+_sparseSafe);\nsb.append(\", castdtm=\"+_requiresCastdtm);\nsb.append(\", mc=\"+_multipleConsumers);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeOuterProduct.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeOuterProduct.java",
"diff": "@@ -30,8 +30,6 @@ public class CNodeOuterProduct extends CNodeTpl\n{\nprivate static final String TEMPLATE =\n\"package codegen;\\n\"\n- + \"import java.util.Arrays;\\n\"\n- + \"import java.util.ArrayList;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.LibSpoofPrimitives;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.SpoofOuterProduct;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.SpoofOuterProduct.OutProdType;\\n\"\n@@ -48,8 +46,7 @@ public class CNodeOuterProduct extends CNodeTpl\n+ \"%BODY_cellwise%\"\n+ \" return %OUT_cellwise%;\\n\"\n+ \" }\\n\"\n-\n- + \"}\";\n+ + \"}\\n\";\nprivate OutProdType _type = null;\nprivate boolean _transposeOutput = false;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeRowAgg.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeRowAgg.java",
"diff": "@@ -27,8 +27,6 @@ public class CNodeRowAgg extends CNodeTpl\n{\nprivate static final String TEMPLATE =\n\"package codegen;\\n\"\n- + \"import java.util.Arrays;\\n\"\n- + \"import java.util.ArrayList;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.LibSpoofPrimitives;\\n\"\n+ \"import org.apache.sysml.runtime.codegen.SpoofRowAggregate;\\n\"\n+ \"\\n\"\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"diff": "@@ -31,7 +31,6 @@ import org.apache.sysml.hops.AggUnaryOp;\nimport org.apache.sysml.hops.BinaryOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.UnaryOp;\n-import org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.Hop.OpOp2;\nimport org.apache.sysml.hops.IndexingOp;\n@@ -67,7 +66,8 @@ public class TemplateCell extends TemplateBase\n@Override\npublic boolean fuse(Hop hop, Hop input) {\nreturn !isClosed() && (isValidOperation(hop)\n- || (HopRewriteUtils.isSum(hop) && ((AggUnaryOp) hop).getDirection()!= Direction.Col)\n+ || ((HopRewriteUtils.isSum(hop)||HopRewriteUtils.isSumSq(hop))\n+ && ((AggUnaryOp) hop).getDirection()!= Direction.Col)\n|| (HopRewriteUtils.isMatrixMultiply(hop) && hop.getDim1()==1 && hop.getDim2()==1)\n&& HopRewriteUtils.isTransposeOperation(hop.getInput().get(0)));\n}\n@@ -81,7 +81,8 @@ public class TemplateCell extends TemplateBase\n@Override\npublic CloseType close(Hop hop) {\n//need to close cell tpl after aggregation, see fuse for exact properties\n- if( (HopRewriteUtils.isSum(hop) && ((AggUnaryOp) hop).getDirection()!= Direction.Col)\n+ if( ((HopRewriteUtils.isSum(hop)||HopRewriteUtils.isSumSq(hop))\n+ && ((AggUnaryOp) hop).getDirection()!= Direction.Col)\n|| (HopRewriteUtils.isMatrixMultiply(hop) && hop.getDim1()==1 && hop.getDim2()==1) )\nreturn CloseType.CLOSED_VALID;\nelse if( hop instanceof AggUnaryOp || hop instanceof AggBinaryOp )\n@@ -114,6 +115,7 @@ public class TemplateCell extends TemplateBase\nCNode output = tmp.get(hop.getHopID());\nCNodeCell tpl = new CNodeCell(inputs, output);\ntpl.setCellType(TemplateUtils.getCellType(hop));\n+ tpl.setAggOp(TemplateUtils.getAggOp(hop));\ntpl.setSparseSafe((HopRewriteUtils.isBinary(hop, OpOp2.MULT) && hop.getInput().contains(sinHops.get(0)))\n|| (HopRewriteUtils.isBinary(hop, OpOp2.DIV) && hop.getInput().get(0) == sinHops.get(0)));\ntpl.setRequiresCastDtm(hop instanceof AggBinaryOp);\n@@ -211,10 +213,10 @@ public class TemplateCell extends TemplateBase\n{\nout = tmp.get(hop.getInput().get(0).getHopID());\n}\n- else if( hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getOp() == AggOp.SUM\n- && (((AggUnaryOp) hop).getDirection() == Direction.RowCol\n- || ((AggUnaryOp) hop).getDirection() == Direction.Row) )\n+ else if( hop instanceof AggUnaryOp )\n{\n+ //aggregation handled in template implementation (note: we do not compile\n+ //^2 of SUM_SQ into the operator to simplify the detection of single operators)\nout = tmp.get(hop.getInput().get(0).getHopID());\n}\nelse if( hop instanceof AggBinaryOp ) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"diff": "@@ -31,6 +31,7 @@ import org.apache.sysml.hops.BinaryOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.TernaryOp;\n+import org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\n@@ -191,10 +192,15 @@ public class TemplateUtils\npublic static CellType getCellType(Hop hop) {\nreturn (hop instanceof AggBinaryOp) ? CellType.FULL_AGG :\n- HopRewriteUtils.isSum(hop) ? ((((AggUnaryOp) hop).getDirection() == Direction.RowCol) ?\n+ (hop instanceof AggUnaryOp) ? ((((AggUnaryOp) hop).getDirection() == Direction.RowCol) ?\nCellType.FULL_AGG : CellType.ROW_AGG) : CellType.NO_AGG;\n}\n+ public static AggOp getAggOp(Hop hop) {\n+ return (hop instanceof AggUnaryOp) ? ((AggUnaryOp)hop).getOp() :\n+ (hop instanceof AggBinaryOp) ? AggOp.SUM : null;\n+ }\n+\npublic static OutProdType getOuterProductType(Hop X, Hop U, Hop V, Hop out) {\nif( out.getDataType() == DataType.SCALAR )\nreturn OutProdType.AGG_OUTER_PRODUCT;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java",
"diff": "@@ -799,6 +799,10 @@ public class HopRewriteUtils\nreturn (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getOp()==AggOp.SUM);\n}\n+ public static boolean isSumSq(Hop hop) {\n+ return (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getOp()==AggOp.SUM_SQ);\n+ }\n+\npublic static boolean isNonZeroIndicator(Hop pred, Hop hop )\n{\nif( pred instanceof BinaryOp && ((BinaryOp)pred).getOp()==OpOp2.NOTEQUAL\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java",
"diff": "@@ -29,7 +29,9 @@ import java.util.concurrent.Executors;\nimport java.util.concurrent.Future;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.functionobjects.KahanFunction;\nimport org.apache.sysml.runtime.functionobjects.KahanPlus;\n+import org.apache.sysml.runtime.functionobjects.KahanPlusSq;\nimport org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.cp.KahanObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\n@@ -48,11 +50,19 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nROW_AGG,\n}\n+ //redefinition of Hop.AggOp for cleaner imports in generate class\n+ public enum AggOp {\n+ SUM,\n+ SUM_SQ,\n+ }\n+\nprivate final CellType _type;\n+ private final AggOp _aggOp;\nprivate final boolean _sparseSafe;\n- public SpoofCellwise(CellType type, boolean sparseSafe) {\n+ public SpoofCellwise(CellType type, AggOp aggOp, boolean sparseSafe) {\n_type = type;\n+ _aggOp = aggOp;\n_sparseSafe = sparseSafe;\n}\n@@ -64,6 +74,16 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nreturn _sparseSafe;\n}\n+ private KahanFunction getAggFunction() {\n+ switch( _aggOp ) {\n+ case SUM: return KahanPlus.getKahanPlusFnObject();\n+ case SUM_SQ: return KahanPlusSq.getKahanPlusSqFnObject();\n+ default:\n+ throw new RuntimeException(\"Unsupported \"\n+ + \"aggregation type: \"+_aggOp.name());\n+ }\n+ }\n+\n@Override\npublic ScalarObject execute(ArrayList<MatrixBlock> inputs, ArrayList<ScalarObject> scalarObjects, int k)\nthrows DMLRuntimeException\n@@ -202,11 +222,9 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nprivate double executeDenseAndAgg(double[] a, double[][] b, double[] scalars, int m, int n, boolean sparseSafe, int rl, int ru)\n{\nKahanObject kbuff = new KahanObject(0, 0);\n- KahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n+ KahanFunction kplus = getAggFunction();\nif( a == null && !sparseSafe ) { //empty\n- //note: we can't determine sparse-safeness by executing the operator once\n- //as the output might change with different row indices\nfor( int i=rl; i<ru; i++ )\nfor( int j=0; j<n; j++ )\nkplus.execute2(kbuff, genexec( 0, b, scalars, m, n, i, j ));\n@@ -248,11 +266,9 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nelse if( _type == CellType.ROW_AGG )\n{\nKahanObject kbuff = new KahanObject(0, 0);\n- KahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n+ KahanFunction kplus = getAggFunction();\nif( a == null && !sparseSafe ) { //empty\n- //note: we can't determine sparse-safeness by executing the operator once\n- //as the output might change with different row indices\nfor( int i=rl; i<ru; i++ ) {\nkbuff.set(0, 0);\nfor( int j=0; j<n; j++ )\n@@ -279,7 +295,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nprivate double executeSparseAndAgg(SparseBlock sblock, double[][] b, double[] scalars, int m, int n, boolean sparseSafe, int rl, int ru)\n{\nKahanObject kbuff = new KahanObject(0, 0);\n- KahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n+ KahanFunction kplus = getAggFunction();\nif( sparseSafe ) {\nif( sblock != null ) {\n@@ -337,7 +353,7 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nelse if( _type == CellType.ROW_AGG )\n{\nKahanObject kbuff = new KahanObject(0, 0);\n- KahanPlus kplus = KahanPlus.getKahanPlusFnObject();\n+ KahanFunction kplus = getAggFunction();\nif( sparseSafe ) {\nif( sblock != null ) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"diff": "@@ -44,18 +44,20 @@ public class CellwiseTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME6 = TEST_NAME+6;\nprivate static final String TEST_NAME7 = TEST_NAME+7;\nprivate static final String TEST_NAME8 = TEST_NAME+8;\n+ private static final String TEST_NAME9 = TEST_NAME+9; //sum((X + 7 * Y)^2)\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n- private final static String TEST_CONF = \"SystemML-config-codegen.xml\";\n- private final static File TEST_CONF_FILE = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF);\n+ private final static String TEST_CONF6 = \"SystemML-config-codegen6.xml\";\n+ private final static String TEST_CONF7 = \"SystemML-config-codegen.xml\";\n+ private static String TEST_CONF = TEST_CONF7;\nprivate static final double eps = Math.pow(10, -10);\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for( int i=1; i<=8; i++ ) {\n+ for( int i=1; i<=9; i++ ) {\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(\nTEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n}\n@@ -102,6 +104,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME8, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite9() {\n+ testCodegenIntegration( TEST_NAME9, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwise1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -143,6 +150,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME8, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwise9() {\n+ testCodegenIntegration( TEST_NAME9, false, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwiseRewrite1_sp() {\ntestCodegenIntegration( TEST_NAME1, true, ExecType.SPARK );\n@@ -158,10 +170,16 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME8, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite9_sp() {\n+ testCodegenIntegration( TEST_NAME9, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldRewrites = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ String oldTestConf = TEST_CONF;\nswitch( instType ){\ncase MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n@@ -172,6 +190,9 @@ public class CellwiseTmplTest extends AutomatedTestBase\ndefault: rtplatform = RUNTIME_PLATFORM.HYBRID; break;\n}\n+ if( testname.equals(TEST_NAME9) )\n+ TEST_CONF = TEST_CONF6;\n+\ntry\n{\nTestConfiguration config = getTestConfiguration(testname);\n@@ -189,7 +210,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\nrunTest(true, false, null, -1);\nrunRScript(true);\n- if(testname.equals(TEST_NAME6) || testname.equals(TEST_NAME7) ) {\n+ if(testname.equals(TEST_NAME6) || testname.equals(TEST_NAME7) || testname.equals(TEST_NAME9) ) {\n//compare scalars\nHashMap<CellIndex, Double> dmlfile = readDMLScalarFromHDFS(\"S\");\nHashMap<CellIndex, Double> rfile = readRScalarFromFS(\"S\");\n@@ -212,6 +233,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\nOptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldRewrites;\nOptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\nOptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ TEST_CONF = oldTestConf;\n}\n}\n@@ -222,6 +244,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\nprotected File getConfigTemplateFile() {\n// Instrumentation in this test's output log to show custom configuration file used for template.\n+ File TEST_CONF_FILE = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF);\nSystem.out.println(\"This test case overrides default configuration with \" + TEST_CONF_FILE.getPath());\nreturn TEST_CONF_FILE;\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/SystemML-config-codegen6.xml",
"diff": "+<!--\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+-->\n+\n+<root>\n+ <!-- local fs tmp working directory-->\n+ <localtmpdir>/tmp/systemml</localtmpdir>\n+\n+ <!-- hdfs tmp working directory-->\n+ <scratch>scratch_space</scratch>\n+\n+ <!-- compiler optimization level, valid values: 0 | 1 | 2 | 3 | 4, default: 2 -->\n+ <optlevel>6</optlevel>\n+\n+ <!-- default number of reduce tasks per MR job, default: 2 x number of nodes -->\n+ <numreducers>10</numreducers>\n+\n+ <!-- override jvm reuse flag for specific MR jobs, valid values: true | false -->\n+ <jvmreuse>false</jvmreuse>\n+\n+ <!-- default block dim for binary block files -->\n+ <defaultblocksize>1000</defaultblocksize>\n+\n+ <!-- run systemml control program as yarn appmaster, in case of MR1 always falls back to client, please disable for debug mode -->\n+ <dml.yarn.appmaster>false</dml.yarn.appmaster>\n+\n+ <!-- maximum jvm heap size of the dml yarn appmaster in MB, the requested memory is 1.5x this parameter -->\n+ <dml.yarn.appmaster.mem>2048</dml.yarn.appmaster.mem>\n+\n+ <!-- maximum jvm heap size of the map/reduce tasks in MB, the requested memory is 1.5x this parameter, negative values ignored -->\n+ <dml.yarn.mapreduce.mem>2048</dml.yarn.mapreduce.mem>\n+\n+ <!-- yarn application submission queue, relevant for default capacity scheduler -->\n+ <dml.yarn.app.queue>default</dml.yarn.app.queue>\n+\n+ <!-- enables multi-threaded matrix multiplications in singlenode control program -->\n+ <cp.parallel.matrixmult>true</cp.parallel.matrixmult>\n+\n+ <!-- enables multi-threaded read/write of text formats in singlenode control program -->\n+ <cp.parallel.textio>true</cp.parallel.textio>\n+\n+ <!-- enables automatic code generation -->\n+ <codegen.enabled>true</codegen.enabled>\n+ <codegen.plancache>true</codegen.plancache>\n+ <codegen.literals>1</codegen.literals>\n+</root>\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl9.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(seq(7, 1006), 500, 2);\n+Y = matrix(seq(6, 1005), 500, 2);\n+\n+Z = X + 7 * Y;\n+R = sum(Z^2)\n+\n+write(R, paste(args[2],\"S\",sep=\"\"))\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl9.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(7, 1006), 500, 2);\n+Y = matrix(seq(6, 1005), 500, 2);\n+\n+Z = X + 7 * Y;\n+R = sum(Z^2)\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1438] Extended code generator (celltmpl w/ sumsq aggregation)
This patch extend the code generator w/ support for sum_squared
aggregation in cell templates, which is important if ran in default
optimization level w/ existing fused operators. Furthermore, this also
includes new test cases and a cleanup of imports in generated classes. |
49,717 | 27.03.2017 14:39:31 | 25,200 | af93ca8a40befaaba79b0f96b4dbf8b8db85be13 | [MINOR] Cleanup of some comments | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -26,15 +26,16 @@ import jcuda.jcublas.cublasFillMode;\nimport jcuda.jcublas.cublasHandle;\nimport jcuda.jcublas.cublasOperation;\nimport jcuda.jcudnn.cudnnActivationDescriptor;\n+import jcuda.jcudnn.cudnnBatchNormMode;\nimport jcuda.jcudnn.cudnnConvolutionDescriptor;\nimport jcuda.jcudnn.cudnnConvolutionFwdPreference;\nimport jcuda.jcudnn.cudnnFilterDescriptor;\nimport jcuda.jcudnn.cudnnHandle;\nimport jcuda.jcudnn.cudnnPoolingDescriptor;\n+import jcuda.jcudnn.cudnnStatus;\nimport jcuda.jcudnn.cudnnTensorDescriptor;\nimport jcuda.jcusparse.JCusparse;\nimport jcuda.jcusparse.cusparseHandle;\n-\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\n@@ -88,6 +89,9 @@ import org.apache.sysml.utils.Statistics;\nimport static jcuda.jcublas.cublasOperation.CUBLAS_OP_N;\nimport static jcuda.jcublas.cublasOperation.CUBLAS_OP_T;\nimport static jcuda.jcudnn.JCudnn.cudnnActivationForward;\n+import static jcuda.jcudnn.JCudnn.cudnnBatchNormalizationBackward;\n+import static jcuda.jcudnn.JCudnn.cudnnBatchNormalizationForwardInference;\n+import static jcuda.jcudnn.JCudnn.cudnnBatchNormalizationForwardTraining;\nimport static jcuda.jcudnn.JCudnn.cudnnConvolutionBackwardData;\nimport static jcuda.jcudnn.JCudnn.cudnnConvolutionBackwardFilter;\nimport static jcuda.jcudnn.JCudnn.cudnnConvolutionForward;\n@@ -126,11 +130,6 @@ import static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice;\nimport static org.apache.sysml.runtime.instructions.gpu.context.JCudaObject.allocate;\nimport static org.apache.sysml.runtime.instructions.gpu.context.JCudaObject.cudaFreeHelper;\n-import jcuda.jcudnn.cudnnBatchNormMode;\n-import jcuda.jcudnn.cudnnStatus;\n-import static jcuda.jcudnn.JCudnn.cudnnBatchNormalizationForwardInference;\n-import static jcuda.jcudnn.JCudnn.cudnnBatchNormalizationForwardTraining;\n-import static jcuda.jcudnn.JCudnn.cudnnBatchNormalizationBackward;\n//FIXME move could to respective instructions, this is not a block library\npublic class LibMatrixCUDA {\n@@ -2518,22 +2517,11 @@ public class LibMatrixCUDA {\nPointer A = getDensePointer(out, instName);\nint rlen = (int) out.getNumRows();\nint clen = (int) out.getNumColumns();\n-// if(constant == 0) {\n-// out.getMatrixCharacteristics().setNonZeros(0);\n-// }\n-// else {\n-// out.getMatrixCharacteristics().setNonZeros(rlen*clen);\n-// }\n- // dense_matrix_set(double* A, double scalar, int rlen, int clen)\n-\nlong t0=0;\nif (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nint size = rlen * clen;\nkernels.launchKernel(\"fill\", ExecutionConfig.getConfigForSimpleVectorOperations(size),\nA, constant, size);\n- // kernels.launchKernel(\"dense_matrix_set\",\n- // ExecutionConfig.getConfigForSimpleMatrixOperations(rlen, clen),\n- // A, constant, rlen, clen);\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_FILL_KERNEL, System.nanoTime() - t0);\n}\n@@ -2549,9 +2537,6 @@ public class LibMatrixCUDA {\nprivate static void deviceCopy(String instName, Pointer src, Pointer dest, int rlen, int clen) throws DMLRuntimeException {\nlong t0=0;\nif (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\n- //kernels.launchKernel(\"dense_matrix_copy\",\n- // ExecutionConfig.getConfigForSimpleMatrixOperations(rlen, clen),\n- // src, dest, rlen, clen);\nint size = rlen * clen * Sizeof.DOUBLE;\ncudaMemcpy(dest, src, size, cudaMemcpyDeviceToDevice);\nif (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_DEVICE_TO_DEVICE, System.nanoTime() - t0);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Cleanup of some comments |
49,768 | 27.03.2017 22:27:44 | 25,200 | 22c5049f8828a5b0eb4a78c615c4dec9bd9a8150 | Automate Release Artifact verification | [
{
"change_type": "MODIFY",
"old_path": "dev/release/artifact-verify.sh",
"new_path": "dev/release/artifact-verify.sh",
"diff": "@@ -95,10 +95,12 @@ done\nORIG_DIR=$(pwd)\nEXEC_DIR=\"`dirname \\\"$0\\\"`\"\n+if [[ ${EXEC_DIR:0:1} != \"/\" ]]; then\n+ EXEC_DIR=$ORIG_DIR/$EXEC_DIR\n+fi\ncd $EXEC_DIR/src/test/java\nif [[ \"$ARTIFACT_VERIFY\" == \"true\" && -z \"$DIST_DIR\" ]]; then\n-# echo \"WARNING: Since --distDir has not passed, default distribution directory '../../../target/release/incubator-systemml/target' has been used.\"\necho \"WARNING: Since --distDir has not passed, default distribution directory '$EXEC_DIR/target/release/incubator-systemml/target' has been used.\"\nDIST_DIR=\"$EXEC_DIR/target/release/incubator-systemml/target\"\nelif [[ ${DIST_DIR:0:1} != \"/\" ]]; then\n"
},
{
"change_type": "MODIFY",
"old_path": "dev/release/src/test/java/org/apache/sysml/validation/ValidateLicAndNotice.java",
"new_path": "dev/release/src/test/java/org/apache/sysml/validation/ValidateLicAndNotice.java",
"diff": "@@ -50,16 +50,22 @@ public class ValidateLicAndNotice\n//Return codes\npublic static final int SUCCESS = 0;\npublic static final int NO_ZIP_TGZ = 1; // 0000 0000 0000 0001\n- public static final int JAR_NOT_IN_LIC = 2; // 0000 0000 0000 0010\n- public static final int JAR_NOT_IN_ZIP = 4; // 0000 0000 0000 0100\n+ public static final int FILE_NOT_IN_LIC = 2; // 0000 0000 0000 0010\n+ public static final int FILE_NOT_IN_ZIP = 4; // 0000 0000 0000 0100\npublic static final int FAILURE = 0xFFFF;\n//String constants\n- public static final String JAR = \"jar\";\npublic static final String ZIP = \"zip\";\npublic static final String TGZ = \"tgz\";\npublic static final String LICENSE = \"LICENSE\";\n+ public static final String JAR = \"jar\";\n+ public static final String DLL = \"dll\";\n+ public static final String EXP = \"exp\";\n+ public static final String LIB = \"lib\";\n+ public static final String PDB = \"pdb\";\n+ public static final String EXE = \"exe\";\n+ public static String[] fileTypes = {JAR, DLL, EXP, LIB, PDB, EXE};\nstatic final int BUFFER = 2048;\n// Zip Distribution directory.\n@@ -87,7 +93,7 @@ public class ValidateLicAndNotice\n*/\npublic int validate() throws Exception {\n- int retCode = SUCCESS, retCodeAll = SUCCESS;\n+ int retCode = SUCCESS, retCodeForAllFileTypes = SUCCESS, retCodeAll = SUCCESS;\nFile distroRoot = new File( getDistroDir());\nFile libDirectory = distroRoot;\n@@ -108,45 +114,50 @@ public class ValidateLicAndNotice\nfor (String zipFile: zips)\n{\n- retCode = SUCCESS;\n-\n+ retCodeForAllFileTypes = SUCCESS;\nSystem.out.println(\"======================================================================================\");\nSystem.out.println(\"Validating zip file : \" + zipFile + \" ...\");\n- List<String> jarsAll = null;\n+ for (String fileType: fileTypes) {\n+ retCode = SUCCESS;\n+\n+ List<String> filesAll = null;\nif (zipFile.endsWith(\".\" + ZIP)) {\nValidateLicAndNotice.extractFileFromZip(libDirectory + \"/\" + zipFile, LICENSE, outTempDir.getAbsolutePath());\n- jarsAll = getFilesFromZip(libDirectory + \"/\" + zipFile, JAR);\n+ filesAll = getFilesFromZip(libDirectory + \"/\" + zipFile, fileType);\n} else if (zipFile.endsWith(\".\" + TGZ)) {\nValidateLicAndNotice.extractFileFromTGZ(libDirectory + \"/\" + zipFile, LICENSE, outTempDir.getAbsolutePath());\n- jarsAll = getFilesFromTGZ(libDirectory + \"/\" + zipFile, JAR);\n+ filesAll = getFilesFromTGZ(libDirectory + \"/\" + zipFile, fileType);\n}\nFile licenseFile = new File(outTempDir, LICENSE);\n- List<String> jars = new ArrayList<String>();\n- for (String jar: jarsAll) {\n+ List<String> files = new ArrayList<String>();\n+ for (String file : filesAll) {\nString strSysMLName = \"SystemML\";\nint sysmlLen = strSysMLName.length();\n- String strBegPart = jar.substring(0, sysmlLen);\n+ String strBegPart = file.substring(0, sysmlLen);\nif (strBegPart.compareToIgnoreCase(strSysMLName) != 0)\n- jars.add(jar);\n+ files.add(file);\n}\n- List<String> bad2 = getLICENSEFilesNotInList(licenseFile, jars, JAR);\n+ List<String> bad2 = getLICENSEFilesNotInList(licenseFile, files, fileType);\nif (bad2.size() > 0) {\n- System.err.println(\"Jars in LICENSE but not in Distribution: \" + bad2);\n- retCode += JAR_NOT_IN_ZIP;\n+ System.err.println(\"Files in LICENSE but not in Distribution: \" + bad2);\n+ retCode += FILE_NOT_IN_ZIP;\n}\n- List<String> bad1 = getFilesNotInLICENSE(licenseFile, jars, JAR);\n+ List<String> bad1 = getFilesNotInLICENSE(licenseFile, files, fileType);\nif (bad1.size() > 0) {\n- System.err.println(\"Jars in distribution but not in LICENSE: \" + bad1);\n- retCode += JAR_NOT_IN_LIC;\n+ System.err.println(\"Files in distribution but not in LICENSE: \" + bad1);\n+ retCode += FILE_NOT_IN_LIC;\n}\n- if (bad1.size() > 0 || bad2.size() > 0)\n+ if (bad1.size() > 0 || bad2.size() > 0) {\nSystem.out.println(\"ERROR: License validation failed for zip file \" + zipFile + \" with error code \" + retCode + \", please validate file manually.\");\n- else\n+ retCodeForAllFileTypes = FAILURE;\n+ }\n+ }\n+ if(retCodeForAllFileTypes == SUCCESS)\nSystem.out.println(\"Validation of zip file : \" + zipFile + \" completed successfully.\");\nretCodeAll = retCode != SUCCESS?FAILURE:retCodeAll;\n@@ -261,17 +272,9 @@ public class ValidateLicAndNotice\n*/\nprivate List<String> getZipsInDistro(File directory) {\nList<String> zips = new ArrayList<String>();\n- for (String fileName : directory.list()){\n- if ((fileName.endsWith(\".\" + ZIP)) || (fileName.endsWith(\".\" + TGZ))) {\n+ for (String fileName : directory.list())\n+ if ((fileName.endsWith(\".\" + ZIP)) || (fileName.endsWith(\".\" + TGZ)))\nzips.add(fileName);\n- }\n-// else {\n-// File file = new File(directory, fileName);\n-// if (file.isDirectory()) {\n-// zips.addAll(getZipsInDistro(f));\n-// }\n-// }\n- }\nreturn zips;\n}\n@@ -400,7 +403,7 @@ public class ValidateLicAndNotice\n* @return Returns list of files having specified extention from zip file .\n*/\npublic static List<String> getFilesFromZip (String zipFileName, String fileExt) {\n- List<String> jars = new ArrayList<String>();\n+ List<String> files = new ArrayList<String>();\ntry {\nZipEntry entry;\nZipFile zipfile = new ZipFile(zipFileName);\n@@ -413,14 +416,14 @@ public class ValidateLicAndNotice\nif (iPos == 0)\n--iPos;\nString strFileName = entry.getName().substring(iPos+1);\n- jars.add(strFileName);\n+ files.add(strFileName);\n// System.out.println(\"File found : \" + strFileName);\n}\n}\n} catch(Exception e) {\ne.printStackTrace();\n}\n- return (jars);\n+ return (files);\n}\n/**\n@@ -445,7 +448,7 @@ public class ValidateLicAndNotice\nreturn null;\n}\n- List<String> jars = new ArrayList<String>();\n+ List<String> files = new ArrayList<String>();\ntry {\nTarArchiveEntry tarEntry = null;\nwhile((tarEntry = tarIn.getNextTarEntry()) != null) {\n@@ -455,14 +458,14 @@ public class ValidateLicAndNotice\nif (iPos == 0)\n--iPos;\nString strFileName = tarEntry.getName().substring(iPos+1);\n- jars.add(strFileName);\n+ files.add(strFileName);\n// System.out.println(\"File found : \" + strFileName);\n}\n}\n} catch(Exception e) {\ne.printStackTrace();\n}\n- return (jars);\n+ return (files);\n}\n/**\n@@ -484,7 +487,7 @@ public class ValidateLicAndNotice\nSystem.out.println(\"Return code = \" + retCode);\n}\ncatch (Exception e) {\n- System.out.println(\"Error while validating license in jars file.\" + e);\n+ System.out.println(\"Error while validating license in zip/tgz file.\" + e);\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1440] Automate Release Artifact verification |
49,736 | 29.03.2017 20:54:01 | 25,200 | 10f5dd920ee4d966c29ce601ce31465492e971f4 | [MINOR] Disable Spark Convolution operation until rectangular blocks are
handled | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"diff": "@@ -71,7 +71,8 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n}\nprivate boolean isEligibleForSpark() {\n- return (op == ConvOp.DIRECT_CONV2D || op == ConvOp.MAX_POOLING) ? true : false;\n+ // return (op == ConvOp.DIRECT_CONV2D || op == ConvOp.MAX_POOLING) ? true : false;\n+ return false;\n}\n@Override\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Disable Spark Convolution operation until rectangular blocks are
handled |
49,736 | 30.03.2017 17:18:34 | 25,200 | c7931403526bda5dba6090f9b40394fbf76924e3 | [MINOR] Disable Spark Convolution Operation and cleanup | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"diff": "@@ -96,7 +96,7 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\ncase BIAS_ADD:\ncase BIAS_MULTIPLY:\n{\n- if(et == ExecType.CP || et == ExecType.GPU || et == ExecType.SPARK) {\n+ if(et == ExecType.CP || et == ExecType.GPU) {\nsetLops(constructConvolutionLops(et, inputs));\nbreak;\n}\n@@ -142,38 +142,6 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nreturn input instanceof ConvolutionOp && ((ConvolutionOp) input).getOp() == ConvOp.DIRECT_CONV2D;\n}\n- @SuppressWarnings(\"unused\")\n- private Lop addReblockIfNecessary(ExecType et, OperationTypes lopOp, Lop in) throws LopsException {\n- if(et == ExecType.SPARK) {\n- switch(lopOp) {\n- case MAX_POOLING:\n- case RELU_MAX_POOLING:\n- case DIRECT_CONV2D:\n- case DIRECT_CONV2D_BIAS_ADD:\n- if(in.getOutputParameters().getColsInBlock() < in.getOutputParameters().getNumCols() ||\n- in.getOutputParameters().getRowsInBlock() != 1) {\n- // Need to add a reblock\n- return new ReBlock(in, 1L, in.getOutputParameters().getNumCols(), DataType.MATRIX, ValueType.DOUBLE, true, et);\n- }\n- else\n- return in;\n- default:\n- throw new LopsException(\"Spark operator is not implemented for \" + lopOp.name());\n- }\n- }\n- return in;\n- }\n-\n- @SuppressWarnings(\"unused\")\n- private void setReblockedOutputDimension(ExecType et, Lop lop) throws HopsException {\n- if(et == ExecType.SPARK) {\n- lop.getOutputParameters().setDimensions(getDim1(), getDim2(), 1L, getDim2(), getNnz(), getUpdateType());\n- }\n- else {\n- setOutputDimensions(lop);\n- }\n- }\n-\npublic Lop constructConvolutionLops(ExecType et, ArrayList<Hop> inputs) throws HopsException, LopsException {\nif(inputs.size() != getNumExpectedInputs())\nthrow new HopsException(\"Incorrect number of inputs for \" + op.name());\n@@ -330,8 +298,6 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n{\nif ( OptimizerUtils.isMemoryBasedOptLevel() ) {\n_etype = findGPUExecTypeByMemEstimate(findExecTypeByMemEstimate());\n- // TODO: Fix this after adding remaining spark instructions\n- _etype = !isEligibleForSpark() && _etype == REMOTE ? ExecType.CP : _etype;\n}\nelse {\n_etype = REMOTE;\n@@ -341,6 +307,9 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\ncheckAndSetInvalidCPDimsAndSize();\n}\n+ // TODO: Fix this after adding remaining spark instructions\n+ _etype = !isEligibleForSpark() && _etype == REMOTE ? ExecType.CP : _etype;\n+\n//mark for recompile (forever)\nif( ConfigurationManager.isDynamicRecompilation() && !dimsKnown(true) && _etype==REMOTE )\nsetRequiresRecompile();\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Disable Spark Convolution Operation and cleanup |
49,732 | 31.03.2017 18:17:47 | 25,200 | 8f7cf77bebc331e98a4020b805eff2da99519498 | Fix a wrong comparison condition
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/QuaternaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/QuaternaryOp.java",
"diff": "@@ -1566,7 +1566,7 @@ public class QuaternaryOp extends Hop implements MultiThreadedHop\n//compare basic inputs and weights (always existing)\nboolean ret = (_op == that2._op\n- && getInput().size() == getInput().size()\n+ && getInput().size() == that2.getInput().size()\n&& getInput().get(0) == that2.getInput().get(0)\n&& getInput().get(1) == that2.getInput().get(1)\n&& getInput().get(2) == that2.getInput().get(2) );\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1449] Fix a wrong comparison condition
Closes #446. |
49,738 | 31.03.2017 17:17:55 | 25,200 | 2e48d951b825fe4ef85dc13f6d69934b8cadfe46 | Extended code generator (replace in rowagg/cell tmpls) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ParameterizedBuiltinOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ParameterizedBuiltinOp.java",
"diff": "@@ -154,6 +154,11 @@ public class ParameterizedBuiltinOp extends Hop implements MultiThreadedHop\ngetInput().get(_paramIndexMap.get(\"target\")) : null;\n}\n+ public Hop getParameterHop(String name) {\n+ return _paramIndexMap.containsKey(name) ?\n+ getInput().get(_paramIndexMap.get(name)) : null;\n+ }\n+\n@Override\npublic void setMaxNumThreads( int k ) {\n_maxNumThreads = k;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java",
"diff": "@@ -28,6 +28,7 @@ public class CNodeTernary extends CNode\n{\npublic enum TernaryType {\nPLUS_MULT, MINUS_MULT,\n+ REPLACE, REPLACE_NAN,\nLOOKUP_RC1;\npublic static boolean contains(String value) {\n@@ -45,6 +46,13 @@ public class CNodeTernary extends CNode\ncase MINUS_MULT:\nreturn \" double %TMP% = %IN1% - %IN2% * %IN3%;\\n\";\n+ case REPLACE:\n+ return \" double %TMP% = (%IN1% == %IN2% || (Double.isNaN(%IN1%) \"\n+ + \"&& Double.isNaN(%IN2%))) ? %IN3% : %IN1%;\\n\";\n+\n+ case REPLACE_NAN:\n+ return \" double %TMP% = Double.isNaN(%IN1%) ? %IN3% : %IN1%;\\n\";\n+\ncase LOOKUP_RC1:\nreturn \" double %TMP% = %IN1%[rowIndex*%IN2%+%IN3%-1];\\n\";\n@@ -101,6 +109,8 @@ public class CNodeTernary extends CNode\nswitch(_type) {\ncase PLUS_MULT: return \"t(+*)\";\ncase MINUS_MULT: return \"t(-*)\";\n+ case REPLACE:\n+ case REPLACE_NAN: return \"t(rplc)\";\ncase LOOKUP_RC1: return \"u(ixrc1)\";\ndefault:\nreturn super.toString();\n@@ -112,6 +122,8 @@ public class CNodeTernary extends CNode\nswitch(_type) {\ncase PLUS_MULT:\ncase MINUS_MULT:\n+ case REPLACE:\n+ case REPLACE_NAN:\ncase LOOKUP_RC1:\n_rows = 0;\n_cols = 0;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"diff": "@@ -34,8 +34,10 @@ import org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.Hop.OpOp2;\n+import org.apache.sysml.hops.Hop.ParamBuiltinOp;\nimport org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.LiteralOp;\n+import org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.TernaryOp;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\nimport org.apache.sysml.hops.codegen.cplan.CNodeBinary;\n@@ -157,7 +159,7 @@ public class TemplateCell extends TemplateBase\nelse if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\ncdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n- String primitiveOpName = ((UnaryOp)hop).getOp().toString();\n+ String primitiveOpName = ((UnaryOp)hop).getOp().name();\nout = new CNodeUnary(cdata1, UnaryType.valueOf(primitiveOpName));\n}\nelse if(hop instanceof BinaryOp)\n@@ -165,7 +167,7 @@ public class TemplateCell extends TemplateBase\nBinaryOp bop = (BinaryOp) hop;\nCNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\n- String primitiveOpName = bop.getOp().toString();\n+ String primitiveOpName = bop.getOp().name();\n//cdata1 is vector\nif( TemplateUtils.isColVector(cdata1) )\n@@ -207,7 +209,21 @@ public class TemplateCell extends TemplateBase\n//construct ternary cnode, primitive operation derived from OpOp3\nout = new CNodeTernary(cdata1, cdata2, cdata3,\n- TernaryType.valueOf(top.getOp().toString()));\n+ TernaryType.valueOf(top.getOp().name()));\n+ }\n+ else if( hop instanceof ParameterizedBuiltinOp )\n+ {\n+ CNode cdata1 = tmp.get(((ParameterizedBuiltinOp)hop).getTargetHop().getHopID());\n+ if( TemplateUtils.isColVector(cdata1) )\n+ cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n+ else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n+ cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n+\n+ CNode cdata2 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"pattern\").getHopID());\n+ CNode cdata3 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"replacement\").getHopID());\n+ TernaryType ttype = (cdata2.isLiteral() && cdata2.getVarname().equals(\"Double.NaN\")) ?\n+ TernaryType.REPLACE_NAN : TernaryType.REPLACE;\n+ out = new CNodeTernary(cdata1, cdata2, cdata3, ttype);\n}\nelse if( hop instanceof IndexingOp )\n{\n@@ -285,7 +301,8 @@ public class TemplateCell extends TemplateBase\n//check supported unary, binary, ternary operations\nreturn hop.getDataType() == DataType.MATRIX && TemplateUtils.isOperationSupported(hop) && (hop instanceof UnaryOp\n|| isBinaryMatrixScalar || isBinaryMatrixVector || isBinaryMatrixMatrixDense\n- || isTernaryVectorScalarVector || isTernaryMatrixScalarMatrixDense);\n+ || isTernaryVectorScalarVector || isTernaryMatrixScalarMatrixDense\n+ || (hop instanceof ParameterizedBuiltinOp && ((ParameterizedBuiltinOp)hop).getOp()==ParamBuiltinOp.REPLACE));\n}\n/**\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRowAgg.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRowAgg.java",
"diff": "@@ -32,6 +32,7 @@ import org.apache.sysml.hops.BinaryOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.LiteralOp;\n+import org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.TernaryOp;\nimport org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.codegen.cplan.CNode;\n@@ -78,7 +79,8 @@ public class TemplateRowAgg extends TemplateBase\nreturn !isClosed() &&\n( (hop instanceof BinaryOp && (HopRewriteUtils.isBinaryMatrixColVectorOperation(hop)\n|| HopRewriteUtils.isBinaryMatrixScalarOperation(hop)) )\n- || (hop instanceof UnaryOp && TemplateCell.isValidOperation(hop))\n+ || ((hop instanceof UnaryOp || hop instanceof ParameterizedBuiltinOp)\n+ && TemplateCell.isValidOperation(hop))\n|| (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()!=Direction.RowCol)\n|| (hop instanceof AggBinaryOp && hop.getDim1()>1\n&& HopRewriteUtils.isTransposeOperation(hop.getInput().get(0))));\n@@ -255,6 +257,20 @@ public class TemplateRowAgg extends TemplateBase\nout = new CNodeTernary(cdata1, cdata2, cdata3,\nTernaryType.valueOf(top.getOp().toString()));\n}\n+ else if( hop instanceof ParameterizedBuiltinOp )\n+ {\n+ CNode cdata1 = tmp.get(((ParameterizedBuiltinOp)hop).getTargetHop().getHopID());\n+ if( TemplateUtils.isColVector(cdata1) )\n+ cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n+ else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n+ cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n+\n+ CNode cdata2 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"pattern\").getHopID());\n+ CNode cdata3 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"replacement\").getHopID());\n+ TernaryType ttype = (cdata2.isLiteral() && cdata2.getVarname().equals(\"Double.NaN\")) ?\n+ TernaryType.REPLACE_NAN : TernaryType.REPLACE;\n+ out = new CNodeTernary(cdata1, cdata2, cdata3, ttype);\n+ }\nelse if( hop instanceof IndexingOp )\n{\nCNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"diff": "@@ -30,6 +30,7 @@ import org.apache.sysml.hops.AggUnaryOp;\nimport org.apache.sysml.hops.BinaryOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.LiteralOp;\n+import org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.TernaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\n@@ -105,6 +106,8 @@ public class TemplateUtils\nreturn BinType.contains(((BinaryOp)h).getOp().name());\nelse if(h instanceof TernaryOp)\nreturn TernaryType.contains(((TernaryOp)h).getOp().name());\n+ else if(h instanceof ParameterizedBuiltinOp)\n+ return TernaryType.contains(((ParameterizedBuiltinOp)h).getOp().name());\nreturn false;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"diff": "@@ -46,6 +46,8 @@ public class CellwiseTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME8 = TEST_NAME+8;\nprivate static final String TEST_NAME9 = TEST_NAME+9; //sum((X + 7 * Y)^2)\nprivate static final String TEST_NAME10 = TEST_NAME+10; //min/max(X + 7 * Y)\n+ private static final String TEST_NAME11 = TEST_NAME+11; //replace((0 / (X - 500))+1, 0/0, 7);\n+\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n@@ -58,7 +60,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for( int i=1; i<=10; i++ ) {\n+ for( int i=1; i<=11; i++ ) {\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(\nTEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n}\n@@ -115,6 +117,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME10, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite11() {\n+ testCodegenIntegration( TEST_NAME11, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwise1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -166,6 +173,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME10, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwise11() {\n+ testCodegenIntegration( TEST_NAME11, false, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwiseRewrite1_sp() {\ntestCodegenIntegration( TEST_NAME1, true, ExecType.SPARK );\n@@ -191,6 +203,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME10, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite11_sp() {\n+ testCodegenIntegration( TEST_NAME11, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\n@@ -247,7 +264,8 @@ public class CellwiseTmplTest extends AutomatedTestBase\nAssert.assertTrue(!heavyHittersContainsSubString(\"tsmm\"));\nelse if( testname.equals(TEST_NAME10) ) //ensure min/max is fused\nAssert.assertTrue(!heavyHittersContainsSubString(\"uamin\",\"uamax\"));\n-\n+ else if( testname.equals(TEST_NAME11) ) //ensure replace is fused\n+ Assert.assertTrue(!heavyHittersContainsSubString(\"replace\"));\n}\nfinally {\nOptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldRewrites;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl11.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(seq(7, 1006), 500, 2, byrow=TRUE);\n+\n+Y = (0 / (X - 500))+1;\n+R = replace(Y, is.nan(Y), 7);\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl11.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(7, 1006), 500, 2);\n+\n+Y = (0 / (X - 500))+1;\n+R = replace(target=Y, pattern=0/0, replacement=7);\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1447] Extended code generator (replace in rowagg/cell tmpls) |
49,772 | 31.03.2017 18:39:19 | 25,200 | ac8ee2befb651ae89c481b63b4a8aa842585f7e4 | [MINOR] Comments and whitespace fixes. | [
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/layers/affine.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/affine.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * Fully-connected (affine) layer.\n+ * Affine (fully-connected) layer.\n*/\nforward = function(matrix[double] X, matrix[double] W, matrix[double] b)\nreturn (matrix[double] out) {\n/*\n- * Computes the forward pass for a fully-connected (affine) layer with\n- * M neurons. The input data has N examples, each with D features.\n+ * Computes the forward pass for an affine (fully-connected) layer\n+ * with M neurons. The input data has N examples, each with D\n+ * features.\n*\n* Inputs:\n* - X: Inputs, of shape (N, D).\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/layers/batch_norm.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/batch_norm.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * Batch normalization layer.\n+ * Batch Normalization layer.\n*/\nforward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/layers/cross_entropy_loss.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/cross_entropy_loss.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * Cross-entropy loss function.\n+ * Cross-Entropy loss function.\n*/\nforward = function(matrix[double] pred, matrix[double] y)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/layers/l1_reg.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/l1_reg.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * L1 regularizataion.\n+ * L1 regularization.\n*/\nforward = function(matrix[double] X, double lambda)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/layers/l2_reg.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/l2_reg.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * L2 regularizataion.\n+ * L2 regularization.\n*/\nforward = function(matrix[double] X, double lambda)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/layers/spatial_batch_norm.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/spatial_batch_norm.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * Spatial batch normalization layer.\n+ * Spatial Batch Normalization layer.\n*/\nsource(\"nn/util.dml\") as util\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Comments and whitespace fixes. |
49,772 | 31.03.2017 19:24:56 | 25,200 | a6d7aa549738e075a8b1cae40f8020295d5e5867 | [MINOR] Updating MNIST example docs. | [
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-predict.dml",
"new_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-predict.dml",
"diff": "# Sample Invocation:\n# Execute using Spark\n# ```\n-# $SPARK_HOME/bin/spark-submit --master local[*] --driver-memory 5G\n+# spark-submit --master local[*] --driver-memory 5G\n# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n# $SYSTEMML_HOME/target/SystemML.jar -f mnist_lenet-predict.dml\n# -nvargs X=data/mnist/images.csv C=1 Hin=28 Win=28\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-train.dml",
"new_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-train.dml",
"diff": "#\n# 2. Execute using Spark\n# ```\n-# $SPARK_HOME/bin/spark-submit --master local[*] --driver-memory 10G\n+# spark-submit --master local[*] --driver-memory 10G\n# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n# $SYSTEMML_HOME/target/SystemML.jar -f mnist_lenet-train.dml\n# -nvargs train=data/mnist/mnist_train.csv test=data/mnist/mnist_test.csv\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_softmax-predict.dml",
"new_path": "scripts/staging/SystemML-NN/examples/mnist_softmax-predict.dml",
"diff": "# Sample Invocation:\n# Execute using Spark\n# ```\n-# $SPARK_HOME/bin/spark-submit --master local[*] --driver-memory 5G\n+# spark-submit --master local[*] --driver-memory 5G\n# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n# $SYSTEMML_HOME/target/SystemML.jar -f mnist_softmax-predict.dml\n# -nvargs X=data/mnist/images.csv model_dir=model/mnist_softmax\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_softmax-train.dml",
"new_path": "scripts/staging/SystemML-NN/examples/mnist_softmax-train.dml",
"diff": "#\n# 2. Execute using Spark\n# ```\n-# $SPARK_HOME/bin/spark-submit --master local[*] --driver-memory 5G\n+# spark-submit --master local[*] --driver-memory 5G\n# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n# $SYSTEMML_HOME/target/SystemML.jar -f mnist_softmax-train.dml\n# -nvargs train=data/mnist/mnist_train.csv test=data/mnist/mnist_test.csv\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Updating MNIST example docs. |
49,772 | 31.03.2017 20:47:35 | 25,200 | 420dd17bee3adf5569cc848c7f0d62e3923bd769 | Updating Preprocessing Notebook
Updating the breast cancer preprocessing notebook with a new function
for splitting the full DataFrame into train and validation DataFrames. | [
{
"change_type": "MODIFY",
"old_path": "projects/breast_cancer/Preprocessing.ipynb",
"new_path": "projects/breast_cancer/Preprocessing.ipynb",
"diff": "\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false,\n+ \"collapsed\": true,\n\"deletable\": true,\n\"editable\": true\n},\n\"outputs\": [],\n\"source\": [\n- \"def create_ground_truth_maps(folder):\\n\",\n+ \"def get_labels_df(folder):\\n\",\n\" \\\"\\\"\\\"\\n\",\n- \" Create lookup maps for ground truth labels.\\n\",\n+ \" Create a DataFrame with the ground truth labels for each slide.\\n\",\n\" \\n\",\n\" Args:\\n\",\n- \" folder: Directory in which the slides folder is stored, as a string.\\n\",\n- \" This should contain a `training_ground_truth.csv` file.\\n\",\n+ \" folder: Directory containing a `training_ground_truth.csv` file\\n\",\n+ \" containing the ground truth \\\"tumor_score\\\" and \\\"molecular_score\\\"\\n\",\n+ \" labels for each slide.\\n\",\n+ \"\\n\",\n+ \" Returns:\\n\",\n+ \" A Pandas DataFrame containing the ground truth labels for each slide.\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" filename = os.path.join(folder, \\\"training_ground_truth.csv\\\")\\n\",\n\" labels = pd.read_csv(filename, names=[\\\"tumor_score\\\",\\\"molecular_score\\\"], header=None)\\n\",\n\" labels[\\\"slide_num\\\"] = range(1, 501)\\n\",\n+ \" return labels\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"def create_ground_truth_maps(labels_df):\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" Create lookup maps for ground truth labels.\\n\",\n+ \" \\n\",\n+ \" Args:\\n\",\n+ \" labels_df: A Pandas DataFrame containing the ground truth labels for\\n\",\n+ \" each slide.\\n\",\n\"\\n\",\n+ \" Returns:\\n\",\n+ \" A tuple of dictionaries mapping from the slide number to the\\n\",\n+ \" tumor score and to the molecular score.\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n\" # Create slide_num -> tumor_score, and slide_num -> molecular_score dictionaries.\\n\",\n- \" tumor_score_dict = {int(s): int(l) for s,l in zip(labels.slide_num, labels.tumor_score)}\\n\",\n- \" molecular_score_dict = {int(s): float(l) for s,l in zip(labels.slide_num, labels.molecular_score)}\\n\",\n+ \" tumor_score_dict = {int(s): int(l) for s,l in zip(labels_df.slide_num, labels_df.tumor_score)}\\n\",\n+ \" molecular_score_dict = {int(s): float(l) for s,l in zip(labels_df.slide_num, labels_df.molecular_score)}\\n\",\n\" return tumor_score_dict, molecular_score_dict\"\n]\n},\n\"editable\": true\n},\n\"source\": [\n- \"# Process All Slides Into A Saved Spark DataFrame\"\n+ \"# Process All Slides Into A Spark DataFrame\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false,\n+ \"collapsed\": true,\n\"deletable\": true,\n\"editable\": true\n},\n\" \\n\",\n\" Args:\\n\",\n\" slide_nums: List of whole-slide numbers to process.\\n\",\n- \" folder: Local directory in which the slides folder is stored, as a string.\\n\",\n- \" This should contain either a `training_image_data` folder with\\n\",\n- \" images in the format `TUPAC-TR-###.svs`, or a `testing_image_data`\\n\",\n- \" folder with images in the format `TUPAC-TE-###.svs`.\\n\",\n+ \" folder: Local directory in which the slides folder and ground truth\\n\",\n+ \" file is stored, as a string. This should contain a\\n\",\n+ \" `training_image_data` folder with images in the format\\n\",\n+ \" `TUPAC-TR-###.svs`, as well as a `training_ground_truth.csv` file\\n\",\n+ \" containing the ground truth \\\"tumor_score\\\" and \\\"molecular_score\\\"\\n\",\n+ \" labels for each slide. Alternatively, the folder should contain a\\n\",\n+ \" `testing_image_data` folder with images in the format `TUPAC-TE-###.svs`.\\n\",\n\" training: Boolean for training or testing datasets.\\n\",\n\" tile_size: The width and height of a square tile to be generated.\\n\",\n\" overlap: Number of pixels by which to overlap the tiles.\\n\",\n\" filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))\\n\",\n\" samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))\\n\",\n\" if training:\\n\",\n- \" tumor_score_dict, molecular_score_dict = create_ground_truth_maps(folder)\\n\",\n+ \" labels_df = get_labels_df(folder)\\n\",\n+ \" tumor_score_dict, molecular_score_dict = create_ground_truth_maps(labels_df)\\n\",\n\" samples_with_labels = (samples.map(\\n\",\n\" lambda tup: (tup[0], tumor_score_dict[tup[0]],\\n\",\n\" molecular_score_dict[tup[0]], Vectors.dense(tup[1]))))\\n\",\n\" df = samples.toDF([\\\"slide_num\\\", \\\"sample\\\"])\\n\",\n\" df = df.select(df.slide_num.astype(\\\"int\\\"), df[\\\"sample\\\"])\\n\",\n\" #df = df.repartition(num_partitions) # Even out the partitions\\n\",\n- \" return df\\n\",\n+ \" return df\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"source\": [\n+ \"# Split Into Separate Train & Validation DataFrames Based On Slide Number\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"def train_val_split(df, slide_nums, folder, add_row_indices):\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" Save a preprocessed DataFrame with a constraint on the file sizes.\\n\",\n+ \" \\n\",\n+ \" Args:\\n\",\n+ \" df: A DataFrame.\\n\",\n+ \" slide_nums: A list of slide numbers to sample from.\\n\",\n+ \" folder: Directory containing a `training_ground_truth.csv` file\\n\",\n+ \" containing the ground truth \\\"tumor_score\\\" and \\\"molecular_score\\\"\\n\",\n+ \" labels for each slide.\\n\",\n+ \" add_row_indices: Boolean for whether or not to prepend an index\\n\",\n+ \" column contain the row index for use downstream by SystemML.\\n\",\n+ \" The column name will be \\\"__INDEX\\\".\\n\",\n\" \\n\",\n+ \" sample_size: The width and height of the square samples.\\n\",\n+ \" grayscale: Whether or not to the samples are in grayscale format, rather\\n\",\n+ \" than RGB.\\n\",\n+ \" folder: HDFS directory in which to save the DataFrame.\\n\",\n+ \" mode: Specifies the behavior of `df.write.mode` when the data already exists.\\n\",\n+ \" Options include:\\n\",\n+ \" * `append`: Append contents of this :class:`DataFrame` to existing data.\\n\",\n+ \" * `overwrite`: Overwrite existing data.\\n\",\n+ \" * `error`: Throw an exception if data already exists.\\n\",\n+ \" * `ignore`: Silently ignore this operation if data already exists.\\n\",\n+ \" format: The format in which to save the DataFrame.\\n\",\n+ \" file_size: Size in MB of each saved file. 128 MB is an empirically ideal size.\\n\",\n+ \" \\n\",\n+ \" Returns:\\n\",\n+ \" A DataFrame in which each row contains the slide number, tumor score,\\n\",\n+ \" molecular score, and the sample stretched out into a Vector.\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" # Create DataFrame of labels.\\n\",\n+ \" labels_df = get_labels_df(folder)\\n\",\n+ \"\\n\",\n+ \" # Create DataFrames of the slide numbers being used (i.e. without the broken ones)\\n\",\n+ \" # and merge with labels.\\n\",\n+ \" slide_nums_df = pd.DataFrame(slide_nums, columns=[\\\"slide_num\\\"])\\n\",\n+ \" labeled_slide_nums_df = pd.merge(slide_nums_df, labels_df, how=\\\"inner\\\", on=\\\"slide_num\\\")\\n\",\n+ \"\\n\",\n+ \" # DEBUG: Examine class distribution.\\n\",\n+ \"# for pdf in [labels_df, labeled_slide_nums_df]:\\n\",\n+ \"# print(pdf.count())\\n\",\n+ \"# print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n+ \"# print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n+ \"# print()\\n\",\n+ \" \\n\",\n+ \" # Randomly split slides 80%/20% into train and validation sets.\\n\",\n+ \" train_nums_df = labeled_slide_nums_df.sample(frac=0.8, random_state=24)\\n\",\n+ \" val_nums_df = labeled_slide_nums_df.drop(train_nums_df.index)\\n\",\n+ \"\\n\",\n+ \" train_nums = (spark.createDataFrame(train_nums_df)\\n\",\n+ \" .selectExpr(\\\"cast(slide_num as int)\\\")\\n\",\n+ \" .coalesce(1))\\n\",\n+ \" val_nums = (spark.createDataFrame(val_nums_df)\\n\",\n+ \" .selectExpr(\\\"cast(slide_num as int)\\\")\\n\",\n+ \" .coalesce(1))\\n\",\n+ \"\\n\",\n+ \" # Note: Explicitly mark the smaller DataFrames as able to be broadcasted\\n\",\n+ \" # in order to have Catalyst choose the more efficient BroadcastHashJoin, \\n\",\n+ \" # rather than the costly SortMergeJoin.\\n\",\n+ \" train = df.join(F.broadcast(train_nums), on=\\\"slide_num\\\")\\n\",\n+ \" val = df.join(F.broadcast(val_nums), on=\\\"slide_num\\\")\\n\",\n+ \" \\n\",\n+ \" # DEBUG: Sanity checks.\\n\",\n+ \"# assert len(pd.merge(train_nums_df, val_nums_df, on=\\\"slide_num\\\")) == 0\\n\",\n+ \"# assert train_nums.join(val_nums, on=\\\"slide_num\\\").count() == 0\\n\",\n+ \"# assert train.join(val, on=\\\"slide_num\\\").count() == 0\\n\",\n+ \"# # - Check distributions.\\n\",\n+ \"# for pdf in train_nums_df, val_nums_df:\\n\",\n+ \"# print(pdf.count())\\n\",\n+ \"# print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n+ \"# print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False), \\\"\\\\n\\\")\\n\",\n+ \"# # - Check total number of examples in each.\\n\",\n+ \"# print(train.count(), val.count())\\n\",\n+ \"# # - Check physical plans for broadcast join.\\n\",\n+ \"# print(train.explain(), val.explain())\\n\",\n+ \" \\n\",\n+ \" # Add row indices for use with SystemML.\\n\",\n+ \" if add_row_indices:\\n\",\n+ \" train = (train.rdd\\n\",\n+ \" .zipWithIndex()\\n\",\n+ \" .map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing\\n\",\n+ \" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n+ \" train = train.select(train[\\\"__INDEX\\\"].astype(\\\"int\\\"), train.slide_num.astype(\\\"int\\\"), \\n\",\n+ \" train.tumor_score.astype(\\\"int\\\"), train.molecular_score, train[\\\"sample\\\"])\\n\",\n+ \"\\n\",\n+ \" val = (val.rdd\\n\",\n+ \" .zipWithIndex()\\n\",\n+ \" .map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing\\n\",\n+ \" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n+ \" val = val.select(val[\\\"__INDEX\\\"].astype(\\\"int\\\"), val.slide_num.astype(\\\"int\\\"),\\n\",\n+ \" val.tumor_score.astype(\\\"int\\\"), val.molecular_score, val[\\\"sample\\\"])\\n\",\n+ \"\\n\",\n+ \" return train, val\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"source\": [\n+ \"# Save\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n\"def save(df, filename, sample_size=256, grayscale=False, folder=\\\"data\\\",\\n\",\n\" mode=\\\"error\\\", format=\\\"parquet\\\", file_size=128):\\n\",\n\" \\\"\\\"\\\"\\n\",\n\"sample_size = 256\\n\",\n\"grayscale = False\\n\",\n\"num_partitions = 20000\\n\",\n+ \"add_row_indices = True\\n\",\n\"folder = \\\"/home/MDM/breast_cancer/data\\\"\\n\",\n\"filename = \\\"samples_{}_{}{}.parquet\\\".format(\\n\",\n\" \\\"labels\\\" if training else \\\"testing\\\", sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n\"save(df, filename, sample_size, grayscale)\"\n]\n},\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"source\": [\n- \"# Split Into Separate Train & Validation DataFrames Based On Slide Number\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"source\": [\n- \"### TODO: Wrap this in a function with appropriate default arguments\"\n- ]\n- },\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"df = spark.read.load(filepath)\"\n]\n},\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false,\n- \"deletable\": true,\n- \"editable\": true,\n- \"scrolled\": false\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"# Create DataFrame of labels.\\n\",\n- \"labels = pd.read_csv(\\n\",\n- \" \\\"data/training_ground_truth.csv\\\", names=[\\\"tumor_score\\\",\\\"molecular_score\\\"], header=None)\\n\",\n- \"labels[\\\"slide_num\\\"] = range(1, 501) # add slide num column\\n\",\n- \"\\n\",\n- \"# Create DataFrames of the slide numbers being used (i.e. without the broken ones)\\n\",\n- \"# and merge with labels.\\n\",\n- \"slide_nums_df = pd.DataFrame(slide_nums, columns=[\\\"slide_num\\\"])\\n\",\n- \"labeled_slide_nums_df = pd.merge(slide_nums_df, labels, how=\\\"inner\\\", on=\\\"slide_num\\\")\\n\",\n- \"\\n\",\n- \"# Examine class distribution.\\n\",\n- \"for pdf in [labels, labeled_slide_nums_df]:\\n\",\n- \" print(pdf.count())\\n\",\n- \" print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n- \" print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n- \" print()\"\n- ]\n- },\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n},\n\"outputs\": [],\n\"source\": [\n- \"# Randomly split slides 80%/20% into train and validation sets.\\n\",\n- \"train_nums_pdf = labeled_slide_nums_df.sample(frac=0.8, random_state=24)\\n\",\n- \"val_nums_pdf = labeled_slide_nums_df.drop(train_nums_pdf.index)\\n\",\n- \"\\n\",\n- \"train_nums = (spark.createDataFrame(train_nums_pdf)\\n\",\n- \" .selectExpr(\\\"cast(slide_num as int)\\\")\\n\",\n- \" .coalesce(1))\\n\",\n- \"val_nums = (spark.createDataFrame(val_nums_pdf)\\n\",\n- \" .selectExpr(\\\"cast(slide_num as int)\\\")\\n\",\n- \" .coalesce(1))\\n\",\n- \"\\n\",\n- \"# Note: Explicitly mark the smaller DataFrames as able to be broadcasted\\n\",\n- \"# in order to have Catalyst choose the more efficient BroadcastHashJoin, \\n\",\n- \"# rather than the costly SortMergeJoin.\\n\",\n- \"train = df.join(F.broadcast(train_nums), on=\\\"slide_num\\\")\\n\",\n- \"val = df.join(F.broadcast(val_nums), on=\\\"slide_num\\\")\\n\",\n- \"\\n\",\n- \"train, val\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false,\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"# Sanity checks.\\n\",\n- \"assert len(pd.merge(train_nums_pdf, val_nums_pdf, on=\\\"slide_num\\\")) == 0\\n\",\n- \"assert train_nums.join(val_nums, on=\\\"slide_num\\\").count() == 0\\n\",\n- \"assert train.join(val, on=\\\"slide_num\\\").count() == 0\\n\",\n- \"\\n\",\n- \"# Check distributions.\\n\",\n- \"for pdf in train_nums_pdf, val_nums_pdf:\\n\",\n- \" print(pdf.count())\\n\",\n- \" print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n- \" print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False), \\\"\\\\n\\\")\\n\",\n- \"\\n\",\n- \"# Check total number of examples in each.\\n\",\n- \"print(train.count(), val.count())\\n\",\n- \"\\n\",\n- \"# Check physical plans for broadcast join.\\n\",\n- \"print(train.explain(), val.explain())\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false,\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"# Add row indices for use with SystemML.\\n\",\n- \"# TODO: Wrap this in a function with appropriate default arguments.\\n\",\n- \"train = (train.rdd\\n\",\n- \" .zipWithIndex()\\n\",\n- \" .map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing\\n\",\n- \" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n- \"train = train.select(train[\\\"__INDEX\\\"].astype(\\\"int\\\"), train.slide_num.astype(\\\"int\\\"), \\n\",\n- \" train.tumor_score.astype(\\\"int\\\"), train.molecular_score, train[\\\"sample\\\"])\\n\",\n- \"\\n\",\n- \"val = (val.rdd\\n\",\n- \" .zipWithIndex()\\n\",\n- \" .map(lambda r: (r[1] + 1, *r[0])) # flatten & convert index to 1-based indexing\\n\",\n- \" .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\\n\",\n- \"val = val.select(val[\\\"__INDEX\\\"].astype(\\\"int\\\"), val.slide_num.astype(\\\"int\\\"),\\n\",\n- \" val.tumor_score.astype(\\\"int\\\"), val.molecular_score, val[\\\"sample\\\"])\\n\",\n- \"\\n\",\n- \"train, val\"\n+ \"# Split into train and validation DataFrames based On slide number\\n\",\n+ \"train, val = train_val_split(df, slide_nums, folder, add_row_indices)\"\n]\n},\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1185] Updating Preprocessing Notebook
Updating the breast cancer preprocessing notebook with a new function
for splitting the full DataFrame into train and validation DataFrames. |
49,768 | 03.04.2017 14:12:03 | 25,200 | c8b71564edd99ef4dc8c4ff52ca75986ca617db4 | [maven-release-plugin] prepare release v0.14.0-incubating-rc1 | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>0.14.0-incubating-SNAPSHOT</version>\n+ <version>0.14.0-incubating</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:git@github.com:apache/incubator-systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=incubator-systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.14.0-incubating-rc1</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [maven-release-plugin] prepare release v0.14.0-incubating-rc1 |
49,772 | 03.04.2017 14:14:07 | 25,200 | b18908ad7d57199ddb78aef30eff932e16dec22e | Updating Preprocessing Notebook
Updates to the Preprocessing notebook in prep for moving the
preprocessing code to a library. | [
{
"change_type": "MODIFY",
"old_path": "projects/breast_cancer/Preprocessing.ipynb",
"new_path": "projects/breast_cancer/Preprocessing.ipynb",
"diff": "\" return samples\"\n]\n},\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"source\": [\n- \"# Visualize Tile\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true,\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"def visualize_tile(tile):\\n\",\n- \" \\\"\\\"\\\"\\n\",\n- \" Plot a tissue tile.\\n\",\n- \" \\n\",\n- \" Args:\\n\",\n- \" tile: A 3D NumPy array of shape (tile_size, tile_size, channels).\\n\",\n- \" \\n\",\n- \" Returns:\\n\",\n- \" None\\n\",\n- \" \\\"\\\"\\\"\\n\",\n- \" plt.imshow(tile)\\n\",\n- \" plt.show()\"\n- ]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"source\": [\n- \"# Visualize Sample\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true,\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"def visualize_sample(sample, size=256):\\n\",\n- \" \\\"\\\"\\\"\\n\",\n- \" Plot a tissue sample.\\n\",\n- \" \\n\",\n- \" Args:\\n\",\n- \" sample: A square sample flattened to a vector of size\\n\",\n- \" (channels*size_x*size_y).\\n\",\n- \" size: The width and height of the square samples.\\n\",\n- \" \\n\",\n- \" Returns:\\n\",\n- \" None\\n\",\n- \" \\\"\\\"\\\"\\n\",\n- \" # Change type, reshape, transpose to (size_x, size_y, channels).\\n\",\n- \" length = sample.shape[0]\\n\",\n- \" channels = int(length / (size * size))\\n\",\n- \" if channels > 1:\\n\",\n- \" sample = sample.astype('uint8').reshape((channels, size, size)).transpose(1,2,0)\\n\",\n- \" plt.imshow(sample)\\n\",\n- \" else:\\n\",\n- \" vmax = 255 if sample.max() > 1 else 1\\n\",\n- \" sample = sample.reshape((size, size))\\n\",\n- \" plt.imshow(sample, cmap=\\\"gray\\\", vmin=0, vmax=vmax)\\n\",\n- \" plt.show()\"\n- ]\n- },\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {\n\" Returns:\\n\",\n\" A Pandas DataFrame containing the ground truth labels for each slide.\\n\",\n\" \\\"\\\"\\\"\\n\",\n- \" filename = os.path.join(folder, \\\"training_ground_truth.csv\\\")\\n\",\n- \" labels = pd.read_csv(filename, names=[\\\"tumor_score\\\",\\\"molecular_score\\\"], header=None)\\n\",\n- \" labels[\\\"slide_num\\\"] = range(1, 501)\\n\",\n- \" return labels\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false,\n- \"deletable\": true,\n- \"editable\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"def create_ground_truth_maps(labels_df):\\n\",\n- \" \\\"\\\"\\\"\\n\",\n- \" Create lookup maps for ground truth labels.\\n\",\n- \" \\n\",\n- \" Args:\\n\",\n- \" labels_df: A Pandas DataFrame containing the ground truth labels for\\n\",\n- \" each slide.\\n\",\n- \"\\n\",\n- \" Returns:\\n\",\n- \" A tuple of dictionaries mapping from the slide number to the\\n\",\n- \" tumor score and to the molecular score.\\n\",\n- \" \\\"\\\"\\\"\\n\",\n- \" # Create slide_num -> tumor_score, and slide_num -> molecular_score dictionaries.\\n\",\n- \" tumor_score_dict = {int(s): int(l) for s,l in zip(labels_df.slide_num, labels_df.tumor_score)}\\n\",\n- \" molecular_score_dict = {int(s): float(l) for s,l in zip(labels_df.slide_num, labels_df.molecular_score)}\\n\",\n- \" return tumor_score_dict, molecular_score_dict\"\n+ \" filepath = os.path.join(folder, \\\"training_ground_truth.csv\\\")\\n\",\n+ \" labels_df = pd.read_csv(filepath, names=[\\\"tumor_score\\\", \\\"molecular_score\\\"], header=None)\\n\",\n+ \" labels_df[\\\"slide_num\\\"] = labels_df.index + 1 # slide numbering starts at 1\\n\",\n+ \" labels_df.set_index(\\\"slide_num\\\", drop=False, inplace=True) # use the slide num as index\\n\",\n+ \" return labels_df\"\n]\n},\n{\n\" molecular score, and the sample stretched out into a Vector.\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" slides = sc.parallelize(slide_nums)\\n\",\n- \" # Force even partitioning by collecting and parallelizing -- for memory issues.\\n\",\n- \" # TODO: Explore computing the ideal paritition sizes based on projected number\\n\",\n- \" # of tiles after filtering.\\n\",\n- \" ## HACK Note: This was a PySpark bug with a fix in the master branch now.\\n\",\n- \" #tile_indices = slides.flatMap(\\n\",\n- \" # lambda slide: process_slide(slide, folder, training, tile_size, overlap)).collect()\\n\",\n- \" #tile_indices = sc.parallelize(tile_indices, num_partitions)\\n\",\n- \" ## END HACK -- update later\\n\",\n+ \" # Create DataFrame of all tile locations and increase number of partitions\\n\",\n+ \" # to avoid OOM during subsequent processing.\\n\",\n\" tile_indices = (slides.flatMap(\\n\",\n\" lambda slide: process_slide(slide, folder, training, tile_size, overlap)))\\n\",\n+ \" # TODO: Explore computing the ideal paritition sizes based on projected number\\n\",\n+ \" # of tiles after filtering. I.e. something like the following:\\n\",\n+ \" #rows = tile_indices.count()\\n\",\n+ \" #part_size = 128\\n\",\n+ \" #channels = 1 if grayscale else 3\\n\",\n+ \" #row_mb = tile_size * tile_size * channels * 8 / 1024 / 1024 # size of one row in MB\\n\",\n+ \" #rows_per_part = round(part_size / row_mb)\\n\",\n+ \" #num_parts = rows / rows_per_part\\n\",\n+ \" ## HACK: Force even partitioning by collecting and parallelizing -- for memory issues.\\n\",\n+ \" ## Note: This was a PySpark bug with a fix in the master branch now.\\n\",\n+ \" #tile_indices = tile_indices.collect()\\n\",\n+ \" #tile_indices = sc.parallelize(tile_indices, num_partitions)\\n\",\n+ \" ## END HACK\\n\",\n\" tile_indices = tile_indices.repartition(num_partitions)\\n\",\n\" tile_indices.cache()\\n\",\n+ \" # Extract all tiles into a DataFrame, filter, and cut into smaller samples.\\n\",\n\" tiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))\\n\",\n\" filtered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))\\n\",\n\" samples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))\\n\",\n\" if training:\\n\",\n+ \" # Append labels\\n\",\n\" labels_df = get_labels_df(folder)\\n\",\n- \" tumor_score_dict, molecular_score_dict = create_ground_truth_maps(labels_df)\\n\",\n\" samples_with_labels = (samples.map(\\n\",\n- \" lambda tup: (tup[0], tumor_score_dict[tup[0]],\\n\",\n- \" molecular_score_dict[tup[0]], Vectors.dense(tup[1]))))\\n\",\n+ \" lambda tup: (tup[0], int(labels_df.at[tup[0],\\\"tumor_score\\\"]),\\n\",\n+ \" float(labels_df.at[tup[0],\\\"molecular_score\\\"]), Vectors.dense(tup[1]))))\\n\",\n\" df = samples_with_labels.toDF([\\\"slide_num\\\", \\\"tumor_score\\\", \\\"molecular_score\\\", \\\"sample\\\"])\\n\",\n\" df = df.select(df.slide_num.astype(\\\"int\\\"), df.tumor_score.astype(\\\"int\\\"),\\n\",\n\" df.molecular_score, df[\\\"sample\\\"])\\n\",\n\" else: # testing data -- no labels\\n\",\n\" df = samples.toDF([\\\"slide_num\\\", \\\"sample\\\"])\\n\",\n\" df = df.select(df.slide_num.astype(\\\"int\\\"), df[\\\"sample\\\"])\\n\",\n- \" #df = df.repartition(num_partitions) # Even out the partitions\\n\",\n+ \" #df = df.repartition(num_partitions) # HACK: Even out the partitions to avoid issues during saving\\n\",\n\" return df\"\n]\n},\n},\n\"outputs\": [],\n\"source\": [\n- \"def train_val_split(df, slide_nums, folder, add_row_indices):\\n\",\n+ \"def train_val_split(df, slide_nums, folder, train_frac=0.8, add_row_indices=True, seed=None, debug=False):\\n\",\n\" \\\"\\\"\\\"\\n\",\n- \" Save a preprocessed DataFrame with a constraint on the file sizes.\\n\",\n+ \" Split a DataFrame of slide samples into training and validation sets.\\n\",\n\" \\n\",\n\" Args:\\n\",\n- \" df: A DataFrame.\\n\",\n+ \" df: A DataFrame in which each row contains the slide number,\\n\",\n+ \" tumor score, molecular score, and the sample stretched out into\\n\",\n+ \" a Vector.\\n\",\n\" slide_nums: A list of slide numbers to sample from.\\n\",\n\" folder: Directory containing a `training_ground_truth.csv` file\\n\",\n\" containing the ground truth \\\"tumor_score\\\" and \\\"molecular_score\\\"\\n\",\n\" labels for each slide.\\n\",\n+ \" train_frac: Fraction of the data to assign to the training set, with\\n\",\n+ \" `1-frac` assigned to the valiation set.\\n\",\n\" add_row_indices: Boolean for whether or not to prepend an index\\n\",\n\" column contain the row index for use downstream by SystemML.\\n\",\n\" The column name will be \\\"__INDEX\\\".\\n\",\n\" \\n\",\n- \" sample_size: The width and height of the square samples.\\n\",\n- \" grayscale: Whether or not to the samples are in grayscale format, rather\\n\",\n- \" than RGB.\\n\",\n- \" folder: HDFS directory in which to save the DataFrame.\\n\",\n- \" mode: Specifies the behavior of `df.write.mode` when the data already exists.\\n\",\n- \" Options include:\\n\",\n- \" * `append`: Append contents of this :class:`DataFrame` to existing data.\\n\",\n- \" * `overwrite`: Overwrite existing data.\\n\",\n- \" * `error`: Throw an exception if data already exists.\\n\",\n- \" * `ignore`: Silently ignore this operation if data already exists.\\n\",\n- \" format: The format in which to save the DataFrame.\\n\",\n- \" file_size: Size in MB of each saved file. 128 MB is an empirically ideal size.\\n\",\n- \" \\n\",\n\" Returns:\\n\",\n\" A DataFrame in which each row contains the slide number, tumor score,\\n\",\n\" molecular score, and the sample stretched out into a Vector.\\n\",\n\" \\\"\\\"\\\"\\n\",\n- \" # Create DataFrame of labels.\\n\",\n+ \" # Create DataFrame of labels for the given slide numbers.\\n\",\n\" labels_df = get_labels_df(folder)\\n\",\n- \"\\n\",\n- \" # Create DataFrames of the slide numbers being used (i.e. without the broken ones)\\n\",\n- \" # and merge with labels.\\n\",\n- \" slide_nums_df = pd.DataFrame(slide_nums, columns=[\\\"slide_num\\\"])\\n\",\n- \" labeled_slide_nums_df = pd.merge(slide_nums_df, labels_df, how=\\\"inner\\\", on=\\\"slide_num\\\")\\n\",\n- \"\\n\",\n- \" # DEBUG: Examine class distribution.\\n\",\n- \"# for pdf in [labels_df, labeled_slide_nums_df]:\\n\",\n- \"# print(pdf.count())\\n\",\n- \"# print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n- \"# print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False))\\n\",\n- \"# print()\\n\",\n+ \" labels_df = labels_df.loc[slide_nums]\\n\",\n\" \\n\",\n\" # Randomly split slides 80%/20% into train and validation sets.\\n\",\n- \" train_nums_df = labeled_slide_nums_df.sample(frac=0.8, random_state=24)\\n\",\n- \" val_nums_df = labeled_slide_nums_df.drop(train_nums_df.index)\\n\",\n+ \" train_nums_df = labels_df.sample(frac=train_frac, random_state=seed)\\n\",\n+ \" val_nums_df = labels_df.drop(train_nums_df.index)\\n\",\n\"\\n\",\n\" train_nums = (spark.createDataFrame(train_nums_df)\\n\",\n\" .selectExpr(\\\"cast(slide_num as int)\\\")\\n\",\n\" train = df.join(F.broadcast(train_nums), on=\\\"slide_num\\\")\\n\",\n\" val = df.join(F.broadcast(val_nums), on=\\\"slide_num\\\")\\n\",\n\" \\n\",\n+ \" if debug:\\n\",\n\" # DEBUG: Sanity checks.\\n\",\n- \"# assert len(pd.merge(train_nums_df, val_nums_df, on=\\\"slide_num\\\")) == 0\\n\",\n- \"# assert train_nums.join(val_nums, on=\\\"slide_num\\\").count() == 0\\n\",\n- \"# assert train.join(val, on=\\\"slide_num\\\").count() == 0\\n\",\n- \"# # - Check distributions.\\n\",\n- \"# for pdf in train_nums_df, val_nums_df:\\n\",\n- \"# print(pdf.count())\\n\",\n- \"# print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n- \"# print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False), \\\"\\\\n\\\")\\n\",\n- \"# # - Check total number of examples in each.\\n\",\n- \"# print(train.count(), val.count())\\n\",\n- \"# # - Check physical plans for broadcast join.\\n\",\n- \"# print(train.explain(), val.explain())\\n\",\n+ \" assert len(pd.merge(train_nums_df, val_nums_df, on=\\\"slide_num\\\")) == 0\\n\",\n+ \" assert train_nums.join(val_nums, on=\\\"slide_num\\\").count() == 0\\n\",\n+ \" assert train.join(val, on=\\\"slide_num\\\").count() == 0\\n\",\n+ \" # - Check distributions.\\n\",\n+ \" for pdf in train_nums_df, val_nums_df:\\n\",\n+ \" print(pdf.count())\\n\",\n+ \" print(pdf[\\\"tumor_score\\\"].value_counts(sort=False))\\n\",\n+ \" print(pdf[\\\"tumor_score\\\"].value_counts(normalize=True, sort=False), \\\"\\\\n\\\")\\n\",\n+ \" # - Check total number of examples in each.\\n\",\n+ \" print(train.count(), val.count())\\n\",\n+ \" # - Check physical plans for broadcast join.\\n\",\n+ \" print(train.explain(), val.explain())\\n\",\n\" \\n\",\n\" # Add row indices for use with SystemML.\\n\",\n\" if add_row_indices:\\n\",\n\"editable\": true\n},\n\"source\": [\n- \"# Save\"\n+ \"# Visualize Tile\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"def visualize_tile(tile):\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" Plot a tissue tile.\\n\",\n+ \" \\n\",\n+ \" Args:\\n\",\n+ \" tile: A 3D NumPy array of shape (tile_size, tile_size, channels).\\n\",\n+ \" \\n\",\n+ \" Returns:\\n\",\n+ \" None\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" plt.imshow(tile)\\n\",\n+ \" plt.show()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"source\": [\n+ \"# Visualize Sample\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true,\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"def visualize_sample(sample, size=256):\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" Plot a tissue sample.\\n\",\n+ \" \\n\",\n+ \" Args:\\n\",\n+ \" sample: A square sample flattened to a vector of size\\n\",\n+ \" (channels*size_x*size_y).\\n\",\n+ \" size: The width and height of the square samples.\\n\",\n+ \" \\n\",\n+ \" Returns:\\n\",\n+ \" None\\n\",\n+ \" \\\"\\\"\\\"\\n\",\n+ \" # Change type, reshape, transpose to (size_x, size_y, channels).\\n\",\n+ \" length = sample.shape[0]\\n\",\n+ \" channels = int(length / (size * size))\\n\",\n+ \" if channels > 1:\\n\",\n+ \" sample = sample.astype('uint8').reshape((channels, size, size)).transpose(1,2,0)\\n\",\n+ \" plt.imshow(sample)\\n\",\n+ \" else:\\n\",\n+ \" vmax = 255 if sample.max() > 1 else 1\\n\",\n+ \" sample = sample.reshape((size, size))\\n\",\n+ \" plt.imshow(sample, cmap=\\\"gray\\\", vmin=0, vmax=vmax)\\n\",\n+ \" plt.show()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {\n+ \"deletable\": true,\n+ \"editable\": true\n+ },\n+ \"source\": [\n+ \"# Save DataFrame\"\n]\n},\n{\n},\n\"outputs\": [],\n\"source\": [\n- \"def save(df, filename, sample_size=256, grayscale=False, folder=\\\"data\\\",\\n\",\n- \" mode=\\\"error\\\", format=\\\"parquet\\\", file_size=128):\\n\",\n+ \"def save(df, filepath, sample_size, grayscale, mode=\\\"error\\\", format=\\\"parquet\\\", file_size=128):\\n\",\n\" \\\"\\\"\\\"\\n\",\n\" Save a preprocessed DataFrame with a constraint on the file sizes.\\n\",\n\" \\n\",\n\" Args:\\n\",\n\" df: A DataFrame.\\n\",\n- \" filename: Name of the file to save.\\n\",\n+ \" filepath: Hadoop-supported path at which to save the DataFrame.\\n\",\n\" sample_size: The width and height of the square samples.\\n\",\n\" grayscale: Whether or not to the samples are in grayscale format, rather\\n\",\n\" than RGB.\\n\",\n- \" folder: HDFS directory in which to save the DataFrame.\\n\",\n\" mode: Specifies the behavior of `df.write.mode` when the data already exists.\\n\",\n\" Options include:\\n\",\n\" * `append`: Append contents of this :class:`DataFrame` to existing data.\\n\",\n\" channels = 1 if grayscale else 3\\n\",\n\" row_mb = sample_size * sample_size * channels * 8 / 1024 / 1024 # size of one row in MB\\n\",\n\" rows_per_file = round(file_size / row_mb)\\n\",\n- \" filepath = os.path.join(folder, filename)\\n\",\n\" df.write.option(\\\"maxRecordsPerFile\\\", rows_per_file).mode(mode).save(filepath, format=format)\"\n]\n},\n\"sample_size = 256\\n\",\n\"grayscale = False\\n\",\n\"num_partitions = 20000\\n\",\n- \"add_row_indices = True\\n\",\n+ \"add_row_indices = False #True\\n\",\n+ \"train_frac = 0.8\\n\",\n\"folder = \\\"/home/MDM/breast_cancer/data\\\"\\n\",\n- \"filename = \\\"samples_{}_{}{}.parquet\\\".format(\\n\",\n- \" \\\"labels\\\" if training else \\\"testing\\\", sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n- \"tr_filename = \\\"train_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n- \"val_filename = \\\"val_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\"\n+ \"# labels_df_path = os.path.join(folder, \\\"training_ground_truth.csv\\\")\\n\",\n+ \"save_folder = \\\"data\\\" # Hadoop-supported directory in which to save DataFrames\\n\",\n+ \"df_path = os.path.join(save_folder, \\\"samples_{}_{}{}.parquet\\\".format(\\n\",\n+ \" \\\"labels\\\" if training else \\\"testing\\\", sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n+ \"train_df_path = os.path.join(save_folder, \\\"train_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n+ \"val_df_path = os.path.join(save_folder, \\\"val_{}{}.parquet\\\".format(sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\"\n]\n},\n{\n\"source\": [\n\"# Process all slides.\\n\",\n\"df = preprocess(slide_nums, tile_size=tile_size, sample_size=sample_size, grayscale=grayscale,\\n\",\n- \" training=training, num_partitions=num_partitions, folder=folder)\\n\",\n- \"df\"\n+ \" training=training, num_partitions=num_partitions, folder=folder)\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"# Save DataFrame of samples.\\n\",\n- \"save(df, filename, sample_size, grayscale)\"\n+ \"save(df, df_path, sample_size, grayscale)\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"# Load full DataFrame from disk.\\n\",\n- \"filepath = os.path.join(\\\"data\\\", filename)\\n\",\n- \"df = spark.read.load(filepath)\"\n+ \"df = spark.read.load(df_path)\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"# Split into train and validation DataFrames based On slide number\\n\",\n- \"train, val = train_val_split(df, slide_nums, folder, add_row_indices)\"\n+ \"train, val = train_val_split(df, slide_nums, folder, train_frac, add_row_indices)\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"# Save train and validation DataFrames.\\n\",\n- \"save(train, tr_filename, sample_size, grayscale)\\n\",\n- \"save(val, val_filename, sample_size, grayscale)\"\n+ \"save(train, train_df_path, sample_size, grayscale)\\n\",\n+ \"save(val, val_df_path, sample_size, grayscale)\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"# Load train and validation DataFrames from disk.\\n\",\n- \"train = spark.read.load(os.path.join(\\\"data\\\", tr_filename))\\n\",\n- \"val = spark.read.load(os.path.join(\\\"data\\\", val_filename))\"\n+ \"train = spark.read.load(train_df_path)\\n\",\n+ \"val = spark.read.load(val_df_path)\"\n]\n},\n{\n\"# Save train and validation DataFrames.\\n\",\n\"tr_sample_filename = \\\"train_{}_sample_{}{}.parquet\\\".format(p, sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n\"val_sample_filename = \\\"val_{}_sample_{}{}.parquet\\\".format(p, sample_size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n- \"save(train_sample, tr_sample_filename, sample_size, grayscale)\\n\",\n- \"save(val_sample, val_sample_filename, sample_size, grayscale)\"\n+ \"train_sample_path = os.path.join(\\\"save_folder\\\", tr_sample_filename)\\n\",\n+ \"val_sample_path = os.path.join(\\\"save_folder\\\", val_sample_filename)\\n\",\n+ \"save(train_sample, train_sample_path, sample_size, grayscale)\\n\",\n+ \"save(val_sample, val_sample_path, sample_size, grayscale)\"\n]\n}\n],\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1185] Updating Preprocessing Notebook
Updates to the Preprocessing notebook in prep for moving the
preprocessing code to a library. |
49,738 | 03.04.2017 14:28:25 | 25,200 | 933824ea9389193ab45851f2b5d1ae3e76f760b1 | Fix inconsistencies generated code vs built-in functions
This patch fixes various unary and binary codegen templates to use
exactly the same as existing builtin functions. Note that this fixes
result correctness issues of integer divide and modulus. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"diff": "@@ -94,9 +94,9 @@ public class CNodeBinary extends CNode\ncase MINUS:\nreturn \" double %TMP% = %IN1% - %IN2%;\\n\" ;\ncase MODULUS:\n- return \" double %TMP% = %IN1% % %IN2%;\\n\" ;\n+ return \" double %TMP% = LibSpoofPrimitives.mod(%IN1%, %IN2%);\\n\" ;\ncase INTDIV:\n- return \" double %TMP% = (int) %IN1% / %IN2%;\\n\" ;\n+ return \" double %TMP% = LibSpoofPrimitives.intDiv(%IN1%, %IN2%);\\n\" ;\ncase LESS:\nreturn \" double %TMP% = (%IN1% < %IN2%) ? 1 : 0;\\n\" ;\ncase LESSEQUAL:\n@@ -111,11 +111,11 @@ public class CNodeBinary extends CNode\nreturn \" double %TMP% = (%IN1% != %IN2%) ? 1 : 0;\\n\" ;\ncase MIN:\n- return \" double %TMP% = Math.min(%IN1%, %IN2%);\\n\" ;\n+ return \" double %TMP% = (%IN1% <= %IN2%) ? %IN1% : %IN2%;\\n\" ;\ncase MAX:\n- return \" double %TMP% = Math.max(%IN1%, %IN2%);\\n\" ;\n+ return \" double %TMP% = (%IN1% >= %IN2%) ? %IN1% : %IN2%;\\n\" ;\ncase LOG:\n- return \" double %TMP% = Math.log(%IN1%)/Math.log(%IN2%);\\n\" ;\n+ return \" double %TMP% = FastMath.log(%IN1%)/FastMath.log(%IN2%);\\n\" ;\ncase POW:\nreturn \" double %TMP% = Math.pow(%IN1%, %IN2%);\\n\" ;\ncase MINUS1_MULT:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"diff": "@@ -60,19 +60,19 @@ public class CNodeUnary extends CNode\ncase ABS:\nreturn \" double %TMP% = Math.abs(%IN1%);\\n\";\ncase SIN:\n- return \" double %TMP% = Math.sin(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.sin(%IN1%);\\n\";\ncase COS:\n- return \" double %TMP% = Math.cos(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.cos(%IN1%);\\n\";\ncase TAN:\n- return \" double %TMP% = Math.tan(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.tan(%IN1%);\\n\";\ncase ASIN:\n- return \" double %TMP% = Math.asin(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.asin(%IN1%);\\n\";\ncase ACOS:\n- return \" double %TMP% = Math.acos(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.acos(%IN1%);\\n\";\ncase ATAN:\nreturn \" double %TMP% = Math.atan(%IN1%);\\n\";\ncase SIGN:\n- return \" double %TMP% = Math.signum(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.signum(%IN1%);\\n\";\ncase SQRT:\nreturn \" double %TMP% = Math.sqrt(%IN1%);\\n\";\ncase LOG:\n@@ -80,9 +80,9 @@ public class CNodeUnary extends CNode\ncase ROUND:\nreturn \" double %TMP% = Math.round(%IN1%);\\n\";\ncase CEIL:\n- return \" double %TMP% = Math.ceil(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.ceil(%IN1%);\\n\";\ncase FLOOR:\n- return \" double %TMP% = Math.floor(%IN1%);\\n\";\n+ return \" double %TMP% = FastMath.floor(%IN1%);\\n\";\ncase SELP:\nreturn \" double %TMP% = (%IN1%>0) ? %IN1% : 0;\\n\";\ncase SPROP:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"diff": "@@ -22,6 +22,8 @@ package org.apache.sysml.runtime.codegen;\nimport java.util.Arrays;\nimport java.util.LinkedList;\n+import org.apache.sysml.runtime.functionobjects.IntegerDivide;\n+import org.apache.sysml.runtime.functionobjects.Modulus;\nimport org.apache.sysml.runtime.matrix.data.LibMatrixMult;\n/**\n@@ -33,6 +35,9 @@ import org.apache.sysml.runtime.matrix.data.LibMatrixMult;\n*/\npublic class LibSpoofPrimitives\n{\n+ private static IntegerDivide intDiv = IntegerDivide.getFnObject();\n+ private static Modulus mod = Modulus.getFnObject();\n+\n//global pool of reusable vectors, individual operations set up their own thread-local\n//ring buffers of reusable vectors with specific number of vectors and vector sizes\nprivate static ThreadLocal<LinkedList<double[]>> memPool = new ThreadLocal<LinkedList<double[]>>() {\n@@ -312,6 +317,18 @@ public class LibSpoofPrimitives\nreturn c;\n}\n+ //complex builtin functions that are not directly generated\n+ //(included here in order to reduce the number of imports)\n+\n+ public static double intDiv(double in1, double in2) {\n+ return intDiv.execute(in1, in2);\n+ }\n+\n+ public static double mod(double in1, double in2) {\n+ return mod.execute(in1, in2);\n+ }\n+\n+\n//dynamic memory management\npublic static void setupThreadLocalMemory(int numVectors, int len) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"diff": "@@ -46,8 +46,8 @@ public class CellwiseTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME8 = TEST_NAME+8;\nprivate static final String TEST_NAME9 = TEST_NAME+9; //sum((X + 7 * Y)^2)\nprivate static final String TEST_NAME10 = TEST_NAME+10; //min/max(X + 7 * Y)\n- private static final String TEST_NAME11 = TEST_NAME+11; //replace((0 / (X - 500))+1, 0/0, 7);\n-\n+ private static final String TEST_NAME11 = TEST_NAME+11; //replace((0 / (X - 500))+1, 0/0, 7)\n+ private static final String TEST_NAME12 = TEST_NAME+12; //((X/3) %% 0.6) + ((X/3) %/% 0.6)\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n@@ -60,7 +60,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for( int i=1; i<=11; i++ ) {\n+ for( int i=1; i<=12; i++ ) {\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(\nTEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n}\n@@ -122,6 +122,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME11, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite12() {\n+ testCodegenIntegration( TEST_NAME12, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwise1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -178,6 +183,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME11, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwise12() {\n+ testCodegenIntegration( TEST_NAME12, false, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwiseRewrite1_sp() {\ntestCodegenIntegration( TEST_NAME1, true, ExecType.SPARK );\n@@ -208,6 +218,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME11, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite12_sp() {\n+ testCodegenIntegration( TEST_NAME12, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldRewrites = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl12.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(seq(7, 1006), 500, 2, byrow=TRUE);\n+\n+R1 = (X/3) %% 0.6;\n+R2 = (X/3) %/% 0.6;\n+R = R1 + R2;\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl12.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(7, 1006), 500, 2);\n+\n+R1 = (X/3) %% 0.6;\n+R2 = (X/3) %/% 0.6;\n+R = R1 + R2;\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1456] Fix inconsistencies generated code vs built-in functions
This patch fixes various unary and binary codegen templates to use
exactly the same as existing builtin functions. Note that this fixes
result correctness issues of integer divide and modulus. |
49,738 | 03.04.2017 15:51:43 | 25,200 | 35476195fb8caab5da1e63350b82df3e2b44cf8a | Fix memoization and cse on codegen cplan construction | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"diff": "@@ -136,6 +136,10 @@ public class TemplateCell extends TemplateBase\nprivate void rConstructCplan(Hop hop, CPlanMemoTable memo, HashMap<Long, CNode> tmp, HashSet<Hop> inHops, boolean compileLiterals)\n{\n+ //memoization for common subexpression elimination and to avoid redundant work\n+ if( tmp.containsKey(hop.getHopID()) )\n+ return;\n+\n//recursively process required childs\nMemoTableEntry me = memo.getBest(hop.getHopID(), TemplateType.CellTpl);\nfor( int i=0; i<hop.getInput().size(); i++ ) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateOuterProduct.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateOuterProduct.java",
"diff": "@@ -133,6 +133,10 @@ public class TemplateOuterProduct extends TemplateBase {\nprivate void rConstructCplan(Hop hop, CPlanMemoTable memo, HashMap<Long, CNode> tmp, HashSet<Hop> inHops, HashMap<String, Hop> inHops2, boolean compileLiterals)\n{\n+ //memoization for common subexpression elimination and to avoid redundant work\n+ if( tmp.containsKey(hop.getHopID()) )\n+ return;\n+\n//recursively process required childs\nMemoTableEntry me = memo.getBest(hop.getHopID(), TemplateType.OuterProdTpl);\nfor( int i=0; i<hop.getInput().size(); i++ ) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRowAgg.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRowAgg.java",
"diff": "@@ -135,6 +135,10 @@ public class TemplateRowAgg extends TemplateBase\nprivate void rConstructCplan(Hop hop, CPlanMemoTable memo, HashMap<Long, CNode> tmp, HashSet<Hop> inHops, HashMap<String, Hop> inHops2, boolean compileLiterals)\n{\n+ //memoization for common subexpression elimination and to avoid redundant work\n+ if( tmp.containsKey(hop.getHopID()) )\n+ return;\n+\n//recursively process required childs\nMemoTableEntry me = memo.getBest(hop.getHopID(), TemplateType.RowAggTpl);\nfor( int i=0; i<hop.getInput().size(); i++ ) {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1458] Fix memoization and cse on codegen cplan construction |
49,738 | 03.04.2017 18:25:44 | 25,200 | 69d8b7c4b53deb3a1d3e4eba99b8718366df1a86 | Extended codegen operations and cost model | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"diff": "@@ -35,8 +35,8 @@ public class CNodeBinary extends CNode\nVECT_LESS_SCALAR, VECT_LESSEQUAL_SCALAR, VECT_GREATER_SCALAR, VECT_GREATEREQUAL_SCALAR,\nMULT, DIV, PLUS, MINUS, MODULUS, INTDIV,\nLESS, LESSEQUAL, GREATER, GREATEREQUAL, EQUAL,NOTEQUAL,\n- MIN, MAX, AND, OR, LOG, POW,\n- MINUS1_MULT;\n+ MIN, MAX, AND, OR, LOG, LOG_NZ, POW,\n+ MINUS1_MULT, MINUS_NZ;\npublic static boolean contains(String value) {\nfor( BinType bt : values() )\n@@ -116,10 +116,14 @@ public class CNodeBinary extends CNode\nreturn \" double %TMP% = (%IN1% >= %IN2%) ? %IN1% : %IN2%;\\n\";\ncase LOG:\nreturn \" double %TMP% = FastMath.log(%IN1%)/FastMath.log(%IN2%);\\n\";\n+ case LOG_NZ:\n+ return \" double %TMP% = (%IN1% == 0) ? 0 : FastMath.log(%IN1%)/FastMath.log(%IN2%);\\n\";\ncase POW:\nreturn \" double %TMP% = Math.pow(%IN1%, %IN2%);\\n\";\ncase MINUS1_MULT:\nreturn \" double %TMP% = 1 - %IN1% * %IN2%;\\n\";\n+ case MINUS_NZ:\n+ return \" double %TMP% = (%IN1% != 0) ? %IN1% - %IN2% : 0;\\n\";\ndefault:\nthrow new RuntimeException(\"Invalid binary type: \"+this.toString());\n@@ -225,6 +229,7 @@ public class CNodeBinary extends CNode\ncase DIV: return \"b(/)\";\ncase PLUS: return \"b(+)\";\ncase MINUS: return \"b(-)\";\n+ case POW: return \"b(^)\";\ncase MODULUS: return \"b(%%)\";\ncase INTDIV: return \"b(%/%)\";\ncase LESS: return \"b(<)\";\n@@ -233,8 +238,11 @@ public class CNodeBinary extends CNode\ncase GREATEREQUAL: return \"b(>=)\";\ncase EQUAL: return \"b(==)\";\ncase NOTEQUAL: return \"b(!=)\";\n+ case OR: return \"b(|)\";\n+ case AND: return \"b(&)\";\ncase MINUS1_MULT: return \"b(1-*)\";\n- default: return \"b(\"+_type.name()+\")\";\n+ case MINUS_NZ: return \"b(-nz)\";\n+ default: return \"b(\"+_type.name().toLowerCase()+\")\";\n}\n}\n@@ -278,6 +286,7 @@ public class CNodeBinary extends CNode\ncase PLUS:\ncase MINUS:\ncase MINUS1_MULT:\n+ case MINUS_NZ:\ncase MODULUS:\ncase INTDIV:\n//SCALAR Comparison\n@@ -293,6 +302,7 @@ public class CNodeBinary extends CNode\ncase AND:\ncase OR:\ncase LOG:\n+ case LOG_NZ:\ncase POW:\n_rows = 0;\n_cols = 0;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"diff": "@@ -28,10 +28,10 @@ public class CNodeUnary extends CNode\n{\npublic enum UnaryType {\nROW_SUMS, LOOKUP_R, LOOKUP_RC, LOOKUP0, //codegen specific\n- EXP, POW2, MULT2, SQRT, LOG,\n+ EXP, POW2, MULT2, SQRT, LOG, LOG_NZ,\nABS, ROUND, CEIL, FLOOR, SIGN,\nSIN, COS, TAN, ASIN, ACOS, ATAN,\n- SELP, SPROP, SIGMOID, LOG_NZ;\n+ SELP, SPROP, SIGMOID;\npublic static boolean contains(String value) {\nfor( UnaryType ut : values() )\n@@ -156,7 +156,7 @@ public class CNodeUnary extends CNode\ncase LOOKUP_R: return \"u(ixr)\";\ncase LOOKUP_RC: return \"u(ixrc)\";\ncase LOOKUP0: return \"u(ix0)\";\n- default: return \"u(\"+_type.name()+\")\";\n+ default: return \"u(\"+_type.name().toLowerCase()+\")\";\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"diff": "@@ -457,7 +457,9 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncase SPROP:\ncase SQRT: costs = 2; break;\ncase EXP: costs = 18; break;\n- case LOG: costs = 32; break;\n+ case SIGMOID: costs = 21; break;\n+ case LOG:\n+ case LOG_NZ: costs = 32; break;\ncase NCOL:\ncase NROW:\ncase PRINT:\n@@ -466,6 +468,12 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncase CAST_AS_INT:\ncase CAST_AS_MATRIX:\ncase CAST_AS_SCALAR: costs = 1; break;\n+ case SIN: costs = 18; break;\n+ case COS: costs = 22; break;\n+ case TAN: costs = 42; break;\n+ case ASIN: costs = 93; break;\n+ case ACOS: costs = 103; break;\n+ case ATAN: costs = 40; break;\ncase CUMSUM:\ncase CUMMIN:\ncase CUMMAX:\n@@ -480,6 +488,10 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncase MULT:\ncase PLUS:\ncase MINUS:\n+ case MIN:\n+ case MAX:\n+ case AND:\n+ case OR:\ncase EQUAL:\ncase NOTEQUAL:\ncase LESS:\n@@ -488,10 +500,15 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncase GREATEREQUAL:\ncase CBIND:\ncase RBIND: costs = 1; break;\n+ case INTDIV: costs = 6; break;\n+ case MODULUS: costs = 8; break;\ncase DIV: costs = 22; break;\n- case LOG: costs = 32; break;\n+ case LOG:\n+ case LOG_NZ: costs = 32; break;\ncase POW: costs = (HopRewriteUtils.isLiteralOfValue(\ncurrent.getInput().get(1), 2) ? 1 : 16); break;\n+ case MINUS_NZ:\n+ case MINUS1_MULT: costs = 2; break;\ndefault:\nthrow new RuntimeException(\"Cost model not \"\n+ \"implemented yet for: \"+((BinaryOp)current).getOp());\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1424] Extended codegen operations and cost model |
49,772 | 04.04.2017 14:34:46 | 25,200 | abbce2bc7e2a281b27aff15e2cc523b41370ad00 | Add `epochs` parameter to `mnist_lenet::train(...)` function.
This commit extracts the `epochs` variable out as a parameter of the
`mnist_lenet::train(...)` function.
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-train.dml",
"new_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-train.dml",
"diff": "# - C: Number of color chanels in the images.\n# - Hin: Input image height.\n# - Win: Input image width.\n-# - out_dir: Directory to store weights and bias matrices of\n-# trained model, as well as final test accuracy.\n+# - epochs: [DEFAULT: 10] Total number of full training loops over\n+# the full data set.\n+# - out_dir: [DEFAULT: \".\"] Directory to store weights and bias\n+# matrices of trained model, as well as final test accuracy.\n# - fmt: [DEFAULT: \"csv\"] File format of `train` and `test` data.\n# Options include: \"csv\", \"mm\", \"text\", and \"binary\".\n#\n# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n# $SYSTEMML_HOME/target/SystemML.jar -f mnist_lenet-train.dml\n# -nvargs train=data/mnist/mnist_train.csv test=data/mnist/mnist_test.csv\n-# C=1 Hin=28 Win=28 out_dir=model/mnist_lenet\n+# C=1 Hin=28 Win=28 epochs=10 out_dir=model/mnist_lenet\n# ```\n#\nsource(\"mnist_lenet.dml\") as mnist_lenet\n-# Read training data\n+# Read training data & settings\nfmt = ifdef($fmt, \"csv\")\ntrain = read($train, format=fmt)\ntest = read($test, format=fmt)\nC = $C\nHin = $Hin\nWin = $Win\n+epochs = ifdef($epochs, 10)\n+out_dir = ifdef($out_dir, \".\")\n# Extract images and labels\nimages = train[,2:ncol(train)]\n@@ -94,17 +98,17 @@ y = labels[5001:nrow(images),]\ny_val = labels[1:5000,]\n# Train\n-[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win)\n+[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win, epochs)\n# Write model out\n-write(W1, $out_dir+\"/W1\")\n-write(b1, $out_dir+\"/b1\")\n-write(W2, $out_dir+\"/W2\")\n-write(b2, $out_dir+\"/b2\")\n-write(W3, $out_dir+\"/W3\")\n-write(b3, $out_dir+\"/b3\")\n-write(W4, $out_dir+\"/W4\")\n-write(b4, $out_dir+\"/b4\")\n+write(W1, out_dir+\"/W1\")\n+write(b1, out_dir+\"/b1\")\n+write(W2, out_dir+\"/W2\")\n+write(b2, out_dir+\"/b2\")\n+write(W3, out_dir+\"/W3\")\n+write(b3, out_dir+\"/b3\")\n+write(W4, out_dir+\"/W4\")\n+write(b4, out_dir+\"/b4\")\n# Eval on test set\nprobs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)\n@@ -112,7 +116,7 @@ probs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4\n# Output results\nprint(\"Test Accuracy: \" + accuracy)\n-write(accuracy, $out_dir+\"/accuracy\")\n+write(accuracy, out_dir+\"/accuracy\")\nprint(\"\")\nprint(\"\")\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_lenet.dml",
"new_path": "scripts/staging/SystemML-NN/examples/mnist_lenet.dml",
"diff": "@@ -35,7 +35,7 @@ source(\"nn/optim/sgd_nesterov.dml\") as sgd_nesterov\ntrain = function(matrix[double] X, matrix[double] y,\nmatrix[double] X_val, matrix[double] y_val,\n- int C, int Hin, int Win)\n+ int C, int Hin, int Win, int epochs)\nreturn (matrix[double] W1, matrix[double] b1,\nmatrix[double] W2, matrix[double] b2,\nmatrix[double] W3, matrix[double] b3,\n@@ -55,6 +55,7 @@ train = function(matrix[double] X, matrix[double] y,\n* - C: Number of input channels (dimensionality of input depth).\n* - Hin: Input height.\n* - Win: Input width.\n+ * - epochs: Total number of full training loops over the full data set.\n*\n* Outputs:\n* - W1: 1st layer weights (parameters) matrix, of shape (F1, C*Hf*Wf).\n@@ -102,7 +103,6 @@ train = function(matrix[double] X, matrix[double] y,\n# Optimize\nprint(\"Starting optimization\")\nbatch_size = 64\n- epochs = 10\niters = ceil(N / batch_size)\nfor (e in 1:epochs) {\nfor(i in 1:iters) {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1460] Add `epochs` parameter to `mnist_lenet::train(...)` function.
This commit extracts the `epochs` variable out as a parameter of the
`mnist_lenet::train(...)` function.
Closes #450. |
49,738 | 04.04.2017 18:33:05 | 25,200 | 1d1a9fa403a9227d1ef56b959132177b532884f6 | [HOTFIX][SYSTEMML-1459] Fix rewrite 'fuse binary subdag' (multi-matches) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/RewriteAlgebraicSimplificationStatic.java",
"diff": "@@ -1028,6 +1028,7 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nBinaryOp bop = (BinaryOp)hi;\nHop left = hi.getInput().get(0);\nHop right = hi.getInput().get(1);\n+ boolean applied = false;\n//sample proportion (sprop) operator\nif( bop.getOp() == OpOp2.MULT && left.getDataType()==DataType.MATRIX && right.getDataType()==DataType.MATRIX )\n@@ -1051,11 +1052,12 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHopRewriteUtils.replaceChildReference(parent, bop, unary, pos);\nHopRewriteUtils.cleanupUnreferenced(bop, left);\nhi = unary;\n+ applied = true;\nLOG.debug(\"Applied fuseBinarySubDAGToUnaryOperation-sprop1\");\n}\n}\n- if( right instanceof BinaryOp ) //X*(1-X)\n+ if( !applied && right instanceof BinaryOp ) //X*(1-X)\n{\nBinaryOp bright = (BinaryOp)right;\nHop right1 = bright.getInput().get(0);\n@@ -1069,13 +1071,15 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHopRewriteUtils.replaceChildReference(parent, bop, unary, pos);\nHopRewriteUtils.cleanupUnreferenced(bop, left);\nhi = unary;\n+ applied = true;\nLOG.debug(\"Applied fuseBinarySubDAGToUnaryOperation-sprop2\");\n}\n}\n}\n+\n//sigmoid operator\n- else if( bop.getOp() == OpOp2.DIV && left.getDataType()==DataType.SCALAR && right.getDataType()==DataType.MATRIX\n+ if( !applied && bop.getOp() == OpOp2.DIV && left.getDataType()==DataType.SCALAR && right.getDataType()==DataType.MATRIX\n&& left instanceof LiteralOp && HopRewriteUtils.getDoubleValue((LiteralOp)left)==1 && right instanceof BinaryOp)\n{\n//note: if there are multiple consumers on the intermediate,\n@@ -1116,20 +1120,20 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nHopRewriteUtils.replaceChildReference(parent, bop, unary, pos);\nHopRewriteUtils.cleanupUnreferenced(bop, bop2, uop);\nhi = unary;\n+ applied = true;\nLOG.debug(\"Applied fuseBinarySubDAGToUnaryOperation-sigmoid1\");\n}\n}\n}\n}\n- //select positive (selp) operator\n- else if( bop.getOp() == OpOp2.MULT && left.getDataType()==DataType.MATRIX && right.getDataType()==DataType.MATRIX )\n+\n+ //select positive (selp) operator (note: same initial pattern as sprop)\n+ if( !applied && bop.getOp() == OpOp2.MULT && left.getDataType()==DataType.MATRIX && right.getDataType()==DataType.MATRIX )\n{\n//by definition, either left or right or none applies.\n//note: if there are multiple consumers on the intermediate tmp=(X>0), it's still beneficial\n//to replace the X*tmp with selp(X) due to lower memory requirements and simply sparsity propagation\n- boolean applied = false;\n-\nif( left instanceof BinaryOp ) //(X>0)*X\n{\nBinaryOp bleft = (BinaryOp)left;\n@@ -1143,7 +1147,6 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nUnaryOp unary = HopRewriteUtils.createUnary(right, OpOp1.SELP);\nHopRewriteUtils.replaceChildReference(parent, bop, unary, pos);\nHopRewriteUtils.cleanupUnreferenced(bop, left);\n-\nhi = unary;\napplied = true;\n@@ -1163,7 +1166,6 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\nUnaryOp unary = HopRewriteUtils.createUnary(left, OpOp1.SELP);\nHopRewriteUtils.replaceChildReference(parent, bop, unary, pos);\nHopRewriteUtils.cleanupUnreferenced(bop, left);\n-\nhi = unary;\napplied= true;\n@@ -1171,25 +1173,29 @@ public class RewriteAlgebraicSimplificationStatic extends HopRewriteRule\n}\n}\n}\n+\n//select positive (selp) operator; pattern: max(X,0) -> selp+\n- else if( bop.getOp() == OpOp2.MAX && left.getDataType()==DataType.MATRIX\n+ if( !applied && bop.getOp() == OpOp2.MAX && left.getDataType()==DataType.MATRIX\n&& right instanceof LiteralOp && HopRewriteUtils.getDoubleValue((LiteralOp)right)==0 )\n{\nUnaryOp unary = HopRewriteUtils.createUnary(left, OpOp1.SELP);\nHopRewriteUtils.replaceChildReference(parent, bop, unary, pos);\nHopRewriteUtils.cleanupUnreferenced(bop);\nhi = unary;\n+ applied = true;\nLOG.debug(\"Applied fuseBinarySubDAGToUnaryOperation-selp3\");\n}\n+\n//select positive (selp) operator; pattern: max(0,X) -> selp+\n- else if( bop.getOp() == OpOp2.MAX && right.getDataType()==DataType.MATRIX\n+ if( !applied && bop.getOp() == OpOp2.MAX && right.getDataType()==DataType.MATRIX\n&& left instanceof LiteralOp && HopRewriteUtils.getDoubleValue((LiteralOp)left)==0 )\n{\nUnaryOp unary = HopRewriteUtils.createUnary(right, OpOp1.SELP);\nHopRewriteUtils.replaceChildReference(parent, bop, unary, pos);\nHopRewriteUtils.cleanupUnreferenced(bop);\nhi = unary;\n+ applied = true;\nLOG.debug(\"Applied fuseBinarySubDAGToUnaryOperation-selp4\");\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX][SYSTEMML-1459] Fix rewrite 'fuse binary subdag' (multi-matches) |
49,768 | 05.04.2017 15:56:10 | 25,200 | 16245f3b2001695fd596a20ddab5f4fb6c43dae3 | [maven-release-plugin] prepare release v0.14.0-incubating-rc2 | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>0.14.0-incubating-SNAPSHOT</version>\n+ <version>0.14.0-incubating</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:git@github.com:apache/incubator-systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=incubator-systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.14.0-incubating-rc2</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [maven-release-plugin] prepare release v0.14.0-incubating-rc2 |
49,772 | 05.04.2017 18:12:57 | 25,200 | bbca632a8617f5d76ed62778616e0ebcd3ccdcab | Updating breast cancer ML notebook & script
Updates to the machine learning notebook and convnet.dml model based on
recent changes. | [
{
"change_type": "MODIFY",
"old_path": "projects/breast_cancer/MachineLearning.ipynb",
"new_path": "projects/breast_cancer/MachineLearning.ipynb",
"diff": "\"size=256\\n\",\n\"grayscale = False\\n\",\n\"c = 1 if grayscale else 3\\n\",\n- \"p = 0.01\"\n+ \"p = 0.01\\n\",\n+ \"folder = \\\"data\\\"\"\n]\n},\n{\n\"outputs\": [],\n\"source\": [\n\"if p < 1:\\n\",\n- \" tr_sample_filename = os.path.join(\\\"data\\\", \\\"train_{}_sample_{}{}.parquet\\\".format(p, size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n- \" val_sample_filename = os.path.join(\\\"data\\\", \\\"val_{}_sample_{}{}.parquet\\\".format(p, size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n+ \" tr_filename = os.path.join(folder, \\\"train_{}_sample_{}{}.parquet\\\".format(p, size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n+ \" val_filename = os.path.join(folder, \\\"val_{}_sample_{}{}.parquet\\\".format(p, size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n\"else:\\n\",\n- \" tr_filename = \\\"train_{}{}.parquet\\\".format(size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n- \" val_filename = \\\"val_{}{}.parquet\\\".format(size, \\\"_grayscale\\\" if grayscale else \\\"\\\")\\n\",\n- \"train_df = sqlContext.read.load(tr_sample_filename)\\n\",\n- \"val_df = sqlContext.read.load(val_sample_filename)\\n\",\n+ \" tr_filename = os.path.join(folder, \\\"train_{}{}.parquet\\\".format(size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n+ \" val_filename = os.path.join(folder, \\\"val_{}{}.parquet\\\".format(size, \\\"_grayscale\\\" if grayscale else \\\"\\\"))\\n\",\n+ \"train_df = spark.read.load(tr_filename)\\n\",\n+ \"val_df = spark.read.load(val_filename)\\n\",\n\"train_df, val_df\"\n]\n},\n\"lr = 1e-2 # learning rate\\n\",\n\"mu = 0.9 # momentum\\n\",\n\"decay = 0.999 # learning rate decay constant\\n\",\n- \"batch_size = 50\\n\",\n+ \"batch_size = 32\\n\",\n\"epochs = 500\\n\",\n\"log_interval = 1\\n\",\n\"n = 200 # sample size for overfitting sanity check\\n\",\n\"lr = 5e-7 # learning rate\\n\",\n\"mu = 0.5 # momentum\\n\",\n\"decay = 0.999 # learning rate decay constant\\n\",\n- \"batch_size = 50\\n\",\n+ \"batch_size = 32\\n\",\n\"epochs = 1\\n\",\n\"log_interval = 10\\n\",\n\"\\n\",\n\"mu = 0.9 # momentum\\n\",\n\"decay = 0.999 # learning rate decay constant\\n\",\n\"lambda = 0 #5e-04\\n\",\n- \"batch_size = 50\\n\",\n+ \"batch_size = 32\\n\",\n\"epochs = 300\\n\",\n\"log_interval = 1\\n\",\n\"dir = \\\"models/lenet-cnn/sanity/\\\"\\n\",\n\" mu = as.scalar(rand(rows=1, cols=1, min=0.5, max=0.9)) # momentum\\n\",\n\" decay = as.scalar(rand(rows=1, cols=1, min=0.9, max=1)) # learning rate decay constant\\n\",\n\" lambda = 10 ^ as.scalar(rand(rows=1, cols=1, min=-7, max=-1)) # regularization constant\\n\",\n- \" batch_size = 50\\n\",\n+ \" batch_size = 32\\n\",\n\" epochs = 1\\n\",\n\" log_interval = 10\\n\",\n\" trial_dir = dir + \\\"j/\\\"\\n\",\n\"mu = 0.632 # momentum\\n\",\n\"decay = 0.99 # learning rate decay constant\\n\",\n\"lambda = 0.00385\\n\",\n- \"batch_size = 50\\n\",\n+ \"batch_size = 32\\n\",\n\"epochs = 1\\n\",\n\"log_interval = 10\\n\",\n\"dir = \\\"models/lenet-cnn/train/\\\"\\n\",\n\"\\n\",\n\"# Train\\n\",\n- \"[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] = clf::train(X, Y, X_val, Y_val, C, Hin, Win, lr, mu, decay, lambda, batch_size, epochs, log_interval, dir)\\n\",\n+ \"[Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2] =\\n\",\n+ \" clf::train(X, Y, X_val, Y_val, C, Hin, Win, lr, mu, decay,\\n\",\n+ \" lambda, batch_size, epochs, log_interval, dir)\\n\",\n\"\\\"\\\"\\\"\\n\",\n- \"outputs = (\\\"Wc1\\\", \\\"bc1\\\", \\\"Wc2\\\", \\\"bc2\\\", \\\"Wc3\\\", \\\"bc3\\\", \\\"Wa1\\\", \\\"ba1\\\", \\\"Wa2\\\", \\\"ba2\\\")\\n\",\n+ \"outputs = (\\\"Wc1\\\", \\\"bc1\\\", \\\"Wc2\\\", \\\"bc2\\\", \\\"Wc3\\\", \\\"bc3\\\",\\n\",\n+ \" \\\"Wa1\\\", \\\"ba1\\\", \\\"Wa2\\\", \\\"ba2\\\")\\n\",\n\"script = (dml(script).input(X=X, X_val=X_val, Y=Y, Y_val=Y_val,\\n\",\n\" C=c, Hin=size, Win=size)\\n\",\n\" .output(*outputs))\\n\",\n- \"Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = ml.execute(script).get(*outputs)\\n\",\n- \"Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2\"\n+ \"outs = ml.execute(script).get(*outputs)\\n\",\n+ \"Wc1, bc1, Wc2, bc2, Wc3, bc3, Wa1, ba1, Wa2, ba2 = outs\"\n]\n},\n{\n],\n\"metadata\": {\n\"kernelspec\": {\n- \"display_name\": \"Python 3\",\n+ \"display_name\": \"Python 3 + Spark 2.x + SystemML\",\n\"language\": \"python\",\n- \"name\": \"python3\"\n+ \"name\": \"pyspark3_2.x\"\n},\n\"language_info\": {\n\"codemirror_mode\": {\n"
},
{
"change_type": "MODIFY",
"old_path": "projects/breast_cancer/convnet.dml",
"new_path": "projects/breast_cancer/convnet.dml",
"diff": "@@ -149,19 +149,19 @@ train = function(matrix[double] X, matrix[double] Y,\nstride, stride, pad, pad)\noutc1r = relu::forward(outc1)\n[outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2)\n+ strideh=2, stridew=2, 0, 0)\n## conv layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,\nstride, stride, pad, pad)\noutc2r = relu::forward(outc2)\n[outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2)\n+ strideh=2, stridew=2, 0, 0)\n## conv layer 3: conv3 -> relu3 -> pool3\n[outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,\nstride, stride, pad, pad)\noutc3r = relu::forward(outc3)\n[outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,\n- strideh=2, stridew=2)\n+ strideh=2, stridew=2, 0, 0)\n## affine layer 1: affine1 -> relu1 -> dropout1\nouta1 = affine::forward(outc3p, Wa1, ba1)\nouta1r = relu::forward(outa1)\n@@ -183,19 +183,19 @@ train = function(matrix[double] X, matrix[double] Y,\n[doutc3p, dWa1, dba1] = affine::backward(douta1, outc3p, Wa1, ba1)\n## conv layer 3: conv3 -> relu3 -> pool3\ndoutc3r = max_pool2d::backward(doutc3p, Houtc3p, Woutc3p, outc3r, F3, Houtc3, Woutc3,\n- Hf=2, Wf=2, strideh=2, stridew=2)\n+ Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)\ndoutc3 = relu::backward(doutc3r, outc3)\n[doutc2p, dWc3, dbc3] = conv2d::backward(doutc3, Houtc3, Woutc3, outc2p, Wc3, bc2, F2,\nHoutc2p, Woutc2p, Hf, Wf, stride, stride, pad, pad)\n## conv layer 2: conv2 -> relu2 -> pool2\ndoutc2r = max_pool2d::backward(doutc2p, Houtc2p, Woutc2p, outc2r, F2, Houtc2, Woutc2,\n- Hf=2, Wf=2, strideh=2, stridew=2)\n+ Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)\ndoutc2 = relu::backward(doutc2r, outc2)\n[doutc1p, dWc2, dbc2] = conv2d::backward(doutc2, Houtc2, Woutc2, outc1p, Wc2, bc2, F1,\nHoutc1p, Woutc1p, Hf, Wf, stride, stride, pad, pad)\n## conv layer 1: conv1 -> relu1 -> pool1\ndoutc1r = max_pool2d::backward(doutc1p, Houtc1p, Woutc1p, outc1r, F1, Houtc1, Woutc1,\n- Hf=2, Wf=2, strideh=2, stridew=2)\n+ Hf=2, Wf=2, strideh=2, stridew=2, 0, 0)\ndoutc1 = relu::backward(doutc1r, outc1)\n[dX_batch, dWc1, dbc1] = conv2d::backward(doutc1, Houtc1, Woutc1, X_batch, Wc1, bc1, C,\nHin, Win, Hf, Wf, stride, stride, pad, pad)\n@@ -382,19 +382,19 @@ predict = function(matrix[double] X, int C, int Hin, int Win,\n# pad, pad)\n#outc1r = relu::forward(outc1)\n#[outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- # strideh=2, stridew=2)\n+ # strideh=2, stridew=2, 0, 0)\n### conv layer 2: conv2 -> relu2 -> pool2\n#[outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,\n# stride, stride, pad, pad)\n#outc2r = relu::forward(outc2)\n#[outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- # strideh=2, stridew=2)\n+ # strideh=2, stridew=2, 0, 0)\n### conv layer 3: conv3 -> relu3 -> pool3\n#[outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,\n# stride, stride, pad, pad)\n#outc3r = relu::forward(outc3)\n#[outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,\n- # strideh=2, stridew=2)\n+ # strideh=2, stridew=2, 0, 0)\n### affine layer 1: affine1 -> relu1 -> dropout\n#outa1 = affine::forward(outc3p, Wa1, ba1)\n#outa1r = relu::forward(outa1)\n@@ -421,19 +421,19 @@ predict = function(matrix[double] X, int C, int Hin, int Win,\nstride, stride, pad, pad)\noutc1r = relu::forward(outc1)\n[outc1p, Houtc1p, Woutc1p] = max_pool2d::forward(outc1r, F1, Houtc1, Woutc1, Hf=2, Wf=2,\n- strideh=2, stridew=2)\n+ strideh=2, stridew=2, 0, 0)\n## conv layer 2: conv2 -> relu2 -> pool2\n[outc2, Houtc2, Woutc2] = conv2d::forward(outc1p, Wc2, bc2, F1, Houtc1p, Woutc1p, Hf, Wf,\nstride, stride, pad, pad)\noutc2r = relu::forward(outc2)\n[outc2p, Houtc2p, Woutc2p] = max_pool2d::forward(outc2r, F2, Houtc2, Woutc2, Hf=2, Wf=2,\n- strideh=2, stridew=2)\n+ strideh=2, stridew=2, 0, 0)\n## conv layer 3: conv3 -> relu3 -> pool3\n[outc3, Houtc3, Woutc3] = conv2d::forward(outc2p, Wc3, bc3, F2, Houtc2p, Woutc2p, Hf, Wf,\nstride, stride, pad, pad)\noutc3r = relu::forward(outc3)\n[outc3p, Houtc3p, Woutc3p] = max_pool2d::forward(outc3r, F3, Houtc3, Woutc3, Hf=2, Wf=2,\n- strideh=2, stridew=2)\n+ strideh=2, stridew=2, 0, 0)\n## affine layer 1: affine1 -> relu1 -> dropout\nouta1 = affine::forward(outc3p, Wa1, ba1)\nouta1r = relu::forward(outa1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1185] Updating breast cancer ML notebook & script
Updates to the machine learning notebook and convnet.dml model based on
recent changes. |
49,772 | 05.04.2017 18:14:40 | 25,200 | 5412e2d7507c554a47f0112c4cc61570ae1b77d5 | Enable generation of sampled data in preprocessing
This enables the generation of a 1% sampled DataFrame during the
preprocessing phase of the breast cancer project. | [
{
"change_type": "MODIFY",
"old_path": "projects/breast_cancer/preprocess.py",
"new_path": "projects/breast_cancer/preprocess.py",
"diff": "@@ -100,49 +100,49 @@ save(val, val_df_path, sample_size, grayscale)\n# ---\n#\n-## Sample Data\n-### TODO: Wrap this in a function with appropriate default arguments\n-#\n-## Load train and validation DataFrames from disk.\n-#train = spark.read.load(train_df_path)\n-#val = spark.read.load(val_df_path)\n-#\n-## Take a stratified sample.\n-#p=0.01\n-#train_sample = train.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n-#val_sample = val.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n-#\n-## Reassign row indices.\n-## TODO: Wrap this in a function with appropriate default arguments.\n-#train_sample = (\n-# train_sample.rdd\n-# .zipWithIndex()\n-# .map(lambda r: (r[1] + 1, *r[0]))\n-# .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n-#train_sample = train_sample.select(train_sample[\"__INDEX\"].astype(\"int\"),\n-# train_sample.slide_num.astype(\"int\"),\n-# train_sample.tumor_score.astype(\"int\"),\n-# train_sample.molecular_score,\n-# train_sample[\"sample\"])\n-#\n-#val_sample = (\n-# val_sample.rdd\n-# .zipWithIndex()\n-# .map(lambda r: (r[1] + 1, *r[0]))\n-# .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n-#val_sample = val_sample.select(val_sample[\"__INDEX\"].astype(\"int\"),\n-# val_sample.slide_num.astype(\"int\"),\n-# val_sample.tumor_score.astype(\"int\"),\n-# val_sample.molecular_score,\n-# val_sample[\"sample\"])\n-#\n-## Save train and validation DataFrames.\n-#tr_sample_filename = \"train_{}_sample_{}{}.parquet\".format(p, sample_size,\n-# \"_grayscale\" if grayscale else \"\")\n-#val_sample_filename = \"val_{}_sample_{}{}.parquet\".format(p, sample_size,\n-# \"_grayscale\" if grayscale else \"\")\n-#train_sample_path = os.path.join(\"save_folder\", tr_sample_filename)\n-#val_sample_path = os.path.join(\"save_folder\", val_sample_filename)\n-#save(train_sample, train_sample_path, sample_size, grayscale)\n-#save(val_sample, val_sample_path, sample_size, grayscale)\n+# Sample Data\n+## TODO: Wrap this in a function with appropriate default arguments\n+\n+# Load train and validation DataFrames from disk.\n+train = spark.read.load(train_df_path)\n+val = spark.read.load(val_df_path)\n+\n+# Take a stratified sample.\n+p=0.01\n+train_sample = train.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n+val_sample = val.drop(\"__INDEX\").sampleBy(\"tumor_score\", fractions={1: p, 2: p, 3: p}, seed=42)\n+\n+# Reassign row indices.\n+# TODO: Wrap this in a function with appropriate default arguments.\n+train_sample = (\n+ train_sample.rdd\n+ .zipWithIndex()\n+ .map(lambda r: (r[1] + 1, *r[0]))\n+ .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n+train_sample = train_sample.select(train_sample[\"__INDEX\"].astype(\"int\"),\n+ train_sample.slide_num.astype(\"int\"),\n+ train_sample.tumor_score.astype(\"int\"),\n+ train_sample.molecular_score,\n+ train_sample[\"sample\"])\n+\n+val_sample = (\n+ val_sample.rdd\n+ .zipWithIndex()\n+ .map(lambda r: (r[1] + 1, *r[0]))\n+ .toDF(['__INDEX', 'slide_num', 'tumor_score', 'molecular_score', 'sample']))\n+val_sample = val_sample.select(val_sample[\"__INDEX\"].astype(\"int\"),\n+ val_sample.slide_num.astype(\"int\"),\n+ val_sample.tumor_score.astype(\"int\"),\n+ val_sample.molecular_score,\n+ val_sample[\"sample\"])\n+\n+# Save train and validation DataFrames.\n+tr_sample_filename = \"train_{}_sample_{}{}.parquet\".format(p, sample_size,\n+ \"_grayscale\" if grayscale else \"\")\n+val_sample_filename = \"val_{}_sample_{}{}.parquet\".format(p, sample_size,\n+ \"_grayscale\" if grayscale else \"\")\n+train_sample_path = os.path.join(save_folder, tr_sample_filename)\n+val_sample_path = os.path.join(save_folder, val_sample_filename)\n+save(train_sample, train_sample_path, sample_size, grayscale)\n+save(val_sample, val_sample_path, sample_size, grayscale)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1185] Enable generation of sampled data in preprocessing
This enables the generation of a 1% sampled DataFrame during the
preprocessing phase of the breast cancer project. |
49,738 | 06.04.2017 17:14:45 | 25,200 | f2ea6336fea757ba0fb81f315af9bb97df6615d3 | Improved codegen handling of empty inputs w/o alloc | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeTernary.java",
"diff": "@@ -54,7 +54,7 @@ public class CNodeTernary extends CNode\nreturn \" double %TMP% = Double.isNaN(%IN1%) ? %IN3% : %IN1%;\\n\";\ncase LOOKUP_RC1:\n- return \" double %TMP% = %IN1%[rowIndex*%IN2%+%IN3%-1];\\n\";\n+ return \" double %TMP% = getValue(%IN1%, rowIndex*%IN2%+%IN3%-1);\\n\";\ndefault:\nthrow new RuntimeException(\"Invalid ternary type: \"+this.toString());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"diff": "@@ -48,9 +48,9 @@ public class CNodeUnary extends CNode\ncase EXP:\nreturn \" double %TMP% = FastMath.exp(%IN1%);\\n\";\ncase LOOKUP_R:\n- return \" double %TMP% = %IN1%[rowIndex];\\n\";\n+ return \" double %TMP% = getValue(%IN1%, rowIndex);\\n\";\ncase LOOKUP_RC:\n- return \" double %TMP% = %IN1%[rowIndex*n+colIndex];\\n\";\n+ return \" double %TMP% = getValue(%IN1%, rowIndex*n+colIndex);\\n\";\ncase LOOKUP0:\nreturn \" double %TMP% = %IN1%[0];\\n\" ;\ncase POW2:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofOperator.java",
"diff": "@@ -70,8 +70,7 @@ public abstract class SpoofOperator implements Serializable\nfor(int i=offset; i<offset+len; i++) {\n//convert empty or sparse to dense temporary block (note: we don't do\n//this in place because this block might be used by multiple threads)\n- if( (inputs.get(i).isEmptyBlock(false) && !inputs.get(i).isAllocated())\n- || inputs.get(i).isInSparseFormat() ) {\n+ if( inputs.get(i).isInSparseFormat() && inputs.get(i).isAllocated() ) {\nMatrixBlock tmp = inputs.get(i);\nb[i-offset] = DataConverter.convertToDoubleVector(tmp);\nLOG.warn(getClass().getName()+\": Converted \"+tmp.getNumRows()+\"x\"+tmp.getNumColumns()+\n@@ -92,4 +91,10 @@ public abstract class SpoofOperator implements Serializable\nscalars[i] = scalarObjects.get(i).getDoubleValue();\nreturn scalars;\n}\n+\n+ //abstraction for safely accessing sideways matrices without the need\n+ //to allocate empty matrices as dense, see prepInputMatrices\n+ protected static double getValue(double[] data, int index) {\n+ return (data!=null) ? data[index] : 0;\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/MultiAggTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/MultiAggTmplTest.java",
"diff": "@@ -59,12 +59,12 @@ public class MultiAggTmplTest extends AutomatedTestBase\n}\n@Test\n- public void testCodegenRowAgg1CP() {\n+ public void testCodegenMultiAgg1CP() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n}\n@Test\n- public void testCodegenRowAgg1Spark() {\n+ public void testCodegenMultiAgg1Spark() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.SPARK );\n}\n@@ -74,12 +74,12 @@ public class MultiAggTmplTest extends AutomatedTestBase\n}\n@Test\n- public void testCodegenRowAgg2CP() {\n+ public void testCodegenMultiAgg2CP() {\ntestCodegenIntegration( TEST_NAME2, false, ExecType.CP );\n}\n@Test\n- public void testCodegenRowAgg2Spark() {\n+ public void testCodegenMultiAgg2Spark() {\ntestCodegenIntegration( TEST_NAME2, false, ExecType.SPARK );\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1472] Improved codegen handling of empty inputs w/o alloc |
49,738 | 06.04.2017 18:51:45 | 25,200 | 7c15339019360f3add4aac23b06cde45a4b758f8 | Fix correctness codegen cellwise min/max aggregation
The cellwise codegen template allows for sum, sum_sq, min and max as
aggregation functions. In case of multi-threaded execution there is a
need for a final aggregation of partial results. This patch fixes
resulting correctness issues for min and max in such scenarios. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofCellwise.java",
"diff": "@@ -122,10 +122,10 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\nboolean sparseSafe = isSparseSafe() || (b.length == 0\n&& genexec( 0, b, scalars, m, n, 0, 0 ) == 0);\n- double sum = 0;\n+ double ret = 0;\nif( k <= 1 ) //SINGLE-THREADED\n{\n- sum = ( !inputs.get(0).isInSparseFormat() ) ?\n+ ret = ( !inputs.get(0).isInSparseFormat() ) ?\nexecuteDenseAndAgg(inputs.get(0).getDenseBlock(), b, scalars, m, n, sparseSafe, 0, m) :\nexecuteSparseAndAgg(inputs.get(0).getSparseBlock(), b, scalars, m, n, sparseSafe, 0, m);\n}\n@@ -143,11 +143,18 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\npool.shutdown();\n//aggregate partial results\n+ ValueFunction vfun = getAggFunction();\n+ if( vfun instanceof KahanFunction ) {\nKahanObject kbuff = new KahanObject(0, 0);\nKahanPlus kplus = KahanPlus.getKahanPlusFnObject();\nfor( Future<Double> task : taskret )\nkplus.execute2(kbuff, task.get());\n- sum = kbuff._sum;\n+ ret = kbuff._sum;\n+ }\n+ else {\n+ for( Future<Double> task : taskret )\n+ ret = vfun.execute(ret, task.get());\n+ }\n}\ncatch(Exception ex) {\nthrow new DMLRuntimeException(ex);\n@@ -157,9 +164,9 @@ public abstract class SpoofCellwise extends SpoofOperator implements Serializabl\n//correction for min/max\nif( (_aggOp == AggOp.MIN || _aggOp == AggOp.MAX) && sparseSafe\n&& inputs.get(0).getNonZeros()<inputs.get(0).getNumRows()*inputs.get(0).getNumColumns() )\n- sum = getAggFunction().execute(sum, 0); //unseen 0 might be max or min value\n+ ret = getAggFunction().execute(ret, 0); //unseen 0 might be max or min value\n- return new DoubleObject(sum);\n+ return new DoubleObject(ret);\n}\n@Override\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"diff": "@@ -48,6 +48,8 @@ public class CellwiseTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME10 = TEST_NAME+10; //min/max(X + 7 * Y)\nprivate static final String TEST_NAME11 = TEST_NAME+11; //replace((0 / (X - 500))+1, 0/0, 7)\nprivate static final String TEST_NAME12 = TEST_NAME+12; //((X/3) %% 0.6) + ((X/3) %/% 0.6)\n+ private static final String TEST_NAME13 = TEST_NAME+13; //min(X + 7 * Y) large\n+\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n@@ -60,7 +62,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for( int i=1; i<=12; i++ ) {\n+ for( int i=1; i<=13; i++ ) {\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(\nTEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n}\n@@ -127,6 +129,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME12, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite13() {\n+ testCodegenIntegration( TEST_NAME13, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwise1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -188,6 +195,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME12, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwise13() {\n+ testCodegenIntegration( TEST_NAME13, false, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwiseRewrite1_sp() {\ntestCodegenIntegration( TEST_NAME1, true, ExecType.SPARK );\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl13.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(seq(7, 1100006), 1100, 1000, byrow=TRUE);\n+Y = matrix(seq(6, 1100005), 1100, 1000, byrow=TRUE);\n+\n+Z = X + -7 * Y;\n+R1 = min(Z);\n+R = as.matrix(R1);\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl13.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(7, 1100006), 1100, 1000);\n+Y = matrix(seq(6, 1100005), 1100, 1000);\n+\n+Z = X + -7 * Y;\n+R1 = min(Z);\n+R = as.matrix(R1);\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1473] Fix correctness codegen cellwise min/max aggregation
The cellwise codegen template allows for sum, sum_sq, min and max as
aggregation functions. In case of multi-threaded execution there is a
need for a final aggregation of partial results. This patch fixes
resulting correctness issues for min and max in such scenarios. |
49,738 | 06.04.2017 20:57:07 | 25,200 | 9820f4c5293c69873f68544748507b6473948f12 | Extended code generator (multi-agg across partitions) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeMultiAgg.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeMultiAgg.java",
"diff": "@@ -106,7 +106,10 @@ public class CNodeMultiAgg extends CNodeTpl\nfor( int i=0; i<_outputs.size(); i++ ) {\nCNode out = _outputs.get(i);\nString tmpOut = getAggTemplate(i);\n- tmpOut = tmpOut.replace(\"%IN%\", out.getVarname());\n+ //get variable name (w/ handling of direct consumption of inputs)\n+ String varName = (out instanceof CNodeData && ((CNodeData)out).getHopID()==\n+ ((CNodeData)_inputs.get(0)).getHopID()) ? \"a\" : out.getVarname();\n+ tmpOut = tmpOut.replace(\"%IN%\", varName);\ntmpOut = tmpOut.replace(\"%IX%\", String.valueOf(i));\nsb.append(tmpOut);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"diff": "@@ -34,7 +34,9 @@ import org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.hops.AggBinaryOp;\nimport org.apache.sysml.hops.AggUnaryOp;\nimport org.apache.sysml.hops.BinaryOp;\n+import org.apache.sysml.hops.DataOp;\nimport org.apache.sysml.hops.Hop;\n+import org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.IndexingOp;\nimport org.apache.sysml.hops.ParameterizedBuiltinOp;\n@@ -82,13 +84,16 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nif( LOG.isTraceEnabled() )\nLOG.trace(\"Partition materialization points: \"+Arrays.toString(M.toArray(new Long[0])));\n- //step 3: create composite templates entries\n+ //step 3: create composite templates (within the partition)\ncreateAndAddMultiAggPlans(memo, partition, R);\n//step 4: plan enumeration and plan selection\nselectPlans(memo, partition, R, M);\n}\n+ //step 5: add composite templates (across partitions)\n+ createAndAddMultiAggPlans(memo, roots);\n+\n//take all distinct best plans\nfor( Entry<Long, List<MemoTableEntry>> e : getBestPlans().entrySet() )\nmemo.setDistinct(e.getKey(), e.getValue());\n@@ -217,6 +222,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\n&& partition.contains(hop.getHopID());\n}\n+ //within-partition multi-agg templates\nprivate static void createAndAddMultiAggPlans(CPlanMemoTable memo, HashSet<Long> partition, HashSet<Long> R)\n{\n//create index of plans that reference full aggregates to avoid circular dependencies\n@@ -262,6 +268,30 @@ public class PlanSelectionFuseCostBased extends PlanSelection\n}\n}\n+ //across-partition multi-agg templates\n+ private static void createAndAddMultiAggPlans(CPlanMemoTable memo, ArrayList<Hop> roots)\n+ {\n+ //#1: collect full aggregations over shared inputs (otherwise never fused)\n+ HashMap<Long, ArrayList<Long>> fullAggs = new HashMap<Long, ArrayList<Long>>();\n+ Hop.resetVisitStatus(roots);\n+ for( Hop hop : roots )\n+ rCollectAggregatesSharedRead(hop, fullAggs);\n+\n+ //construct and add multiagg template plans (w/ max 3 aggregations)\n+ for( Entry<Long, ArrayList<Long>> e : fullAggs.entrySet() ) {\n+ if( e.getValue().size()<=1 )\n+ continue;\n+ ArrayList<Long> aggs = e.getValue();\n+ MemoTableEntry me = new MemoTableEntry(TemplateType.MultiAggTpl,\n+ aggs.get(0), aggs.get(1), (aggs.size()>2)?aggs.get(2):-1);\n+ for( int i=0; i<aggs.size(); i++ ) {\n+ memo.add(memo._hopRefs.get(aggs.get(i)), me);\n+ if( LOG.isTraceEnabled() )\n+ LOG.trace(\"Added multiagg* plan: \"+aggs.get(i)+\" \"+me);\n+ }\n+ }\n+ }\n+\nprivate static boolean isValidMultiAggregate(CPlanMemoTable memo, MemoTableEntry me) {\n//ensure that aggregates are independent of each other, i.e.,\n//they to not have potentially transitive parent child references\n@@ -285,6 +315,28 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nreturn ret;\n}\n+ private static void rCollectAggregatesSharedRead(Hop current, HashMap<Long, ArrayList<Long>> aggs) {\n+ if( current.isVisited() )\n+ return;\n+\n+ //collect all applicable full aggregations per read\n+ if( HopRewriteUtils.isAggUnaryOp(current, AggOp.SUM, AggOp.SUM_SQ, AggOp.MIN, AggOp.MAX)\n+ && ((AggUnaryOp)current).getDirection()==Direction.RowCol\n+ && current.getInput().get(0) instanceof DataOp )\n+ {\n+ Hop input = current.getInput().get(0);\n+ if( !aggs.containsKey(input.getHopID()) )\n+ aggs.put(input.getHopID(), new ArrayList<Long>());\n+ aggs.get(input.getHopID()).add(current.getHopID());\n+ }\n+\n+ //recursively process children\n+ for( Hop c : current.getInput() )\n+ rCollectAggregatesSharedRead(c, aggs);\n+\n+ current.setVisited();\n+ }\n+\nprivate void selectPlans(CPlanMemoTable memo, HashSet<Long> partition, HashSet<Long> R, ArrayList<Long> M)\n{\n//if no materialization points, use basic fuse-all w/ partition awareness\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"diff": "@@ -29,6 +29,7 @@ import java.util.stream.Collectors;\nimport org.apache.sysml.hops.AggBinaryOp;\nimport org.apache.sysml.hops.AggUnaryOp;\nimport org.apache.sysml.hops.BinaryOp;\n+import org.apache.sysml.hops.DataOp;\nimport org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.UnaryOp;\nimport org.apache.sysml.hops.Hop.AggOp;\n@@ -149,7 +150,7 @@ public class TemplateCell extends TemplateBase\nMemoTableEntry me = memo.getBest(hop.getHopID(), TemplateType.CellTpl);\nfor( int i=0; i<hop.getInput().size(); i++ ) {\nHop c = hop.getInput().get(i);\n- if( me.isPlanRef(i) )\n+ if( me!=null && me.isPlanRef(i) && !(c instanceof DataOp) )\nrConstructCplan(c, memo, tmp, inHops, compileLiterals);\nelse {\nCNodeData cdata = TemplateUtils.createCNodeData(c, compileLiterals);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1288] Extended code generator (multi-agg across partitions) |
49,738 | 07.04.2017 13:40:40 | 25,200 | de270219f7cf462c0d7c3471bcae1b17b81962b6 | Fix codegen cost model (number and size of inputs)
This patch fixes the cost-based codegen plan selection to account for
correct input sizes and disregard scalar inputs. Furthermore, this also
adds more trace logging information and fixes remaining correctness
issues of multi aggregates. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"diff": "@@ -244,7 +244,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nfullAggs.add(hopID);\n}\nif( LOG.isTraceEnabled() ) {\n- LOG.trace(\"Found ua(RC) aggregations: \" +\n+ LOG.trace(\"Found within-partition ua(RC) aggregations: \" +\nArrays.toString(fullAggs.toArray(new Long[0])));\n}\n@@ -277,6 +277,12 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nfor( Hop hop : roots )\nrCollectAggregatesSharedRead(hop, fullAggs);\n+ if( LOG.isTraceEnabled() ) {\n+ for( Entry<Long, ArrayList<Long>> e : fullAggs.entrySet() )\n+ LOG.trace(\"Found across-partition ua(RC) aggregations for \"+e.getKey()+\": \" +\n+ Arrays.toString(e.getValue().toArray(new Long[0])));\n+ }\n+\n//construct and add multiagg template plans (w/ max 3 aggregations)\nfor( Entry<Long, ArrayList<Long>> e : fullAggs.entrySet() ) {\nif( e.getValue().size()<=1 )\n@@ -531,8 +537,8 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncosts += rGetPlanCosts(memo, c, visited, partition, M, plan, computeCosts, costVect, best.type);\nelse { //include children and I/O costs\ncosts += rGetPlanCosts(memo, c, visited, partition, M, plan, computeCosts, null, null);\n- if( costVect != null )\n- costVect.addInputSize( c.getHopID(), Math.max(current.getDim1(),1)*Math.max(current.getDim2(),1));\n+ if( costVect != null && c.getDataType().isMatrix() )\n+ costVect.addInputSize( c.getHopID(), Math.max(c.getDim1(),1)*Math.max(c.getDim2(),1));\n}\n}\n@@ -711,6 +717,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\n@Override\npublic String toString() {\nreturn \"[\"+outSize+\", \"+computeCosts+\", {\"\n+ +Arrays.toString(inSizes.keySet().toArray(new Long[0]))+\", \"\n+Arrays.toString(inSizes.values().toArray(new Double[0]))+\"}]\";\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java",
"diff": "@@ -156,14 +156,18 @@ public abstract class SpoofMultiAggregate extends SpoofOperator implements Seria\nprivate void setInitialOutputValues(double[] c) {\n- for( int k=0; k<_aggOps.length; k++ ) {\n- switch(_aggOps[k]) {\n- case SUM:\n- case SUM_SQ: c[k] = 0; break;\n- case MIN: c[k] = Double.MAX_VALUE; break;\n- case MAX: c[k] = -Double.MAX_VALUE; break;\n+ for( int k=0; k<_aggOps.length; k++ )\n+ c[k] = getInitialValue(_aggOps[k]);\n}\n+\n+ public static double getInitialValue(AggOp aggop) {\n+ switch( aggop ) {\n+ case SUM:\n+ case SUM_SQ: return 0;\n+ case MIN: return Double.MAX_VALUE;\n+ case MAX: return -Double.MAX_VALUE;\n}\n+ return 0;\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1475] Fix codegen cost model (number and size of inputs)
This patch fixes the cost-based codegen plan selection to account for
correct input sizes and disregard scalar inputs. Furthermore, this also
adds more trace logging information and fixes remaining correctness
issues of multi aggregates. |
49,738 | 07.04.2017 19:45:41 | 25,200 | 18ab98a6e2083551618f4bf62dba3a19574b115b | Support rowMins/rowMaxs in codegen row aggregates
This patch extends the codegen compiler by rowmins and rowmax for row
aggregate templates and introduces the related runtime primitives.
Furthermore, this patch also fixes various existing primitives such as
sparse rowSums, and dense vector comparisons. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"diff": "@@ -21,13 +21,15 @@ package org.apache.sysml.hops.codegen.cplan;\nimport java.util.Arrays;\n+import org.apache.commons.lang.StringUtils;\nimport org.apache.sysml.parser.Expression.DataType;\npublic class CNodeUnary extends CNode\n{\npublic enum UnaryType {\n- ROW_SUMS, LOOKUP_R, LOOKUP_RC, LOOKUP0, //codegen specific\n+ LOOKUP_R, LOOKUP_RC, LOOKUP0, //codegen specific\n+ ROW_SUMS, ROW_MINS, ROW_MAXS, //codegen specific\nEXP, POW2, MULT2, SQRT, LOG, LOG_NZ,\nABS, ROUND, CEIL, FLOOR, SIGN,\nSIN, COS, TAN, ASIN, ACOS, ATAN,\n@@ -43,8 +45,11 @@ public class CNodeUnary extends CNode\npublic String getTemplate(boolean sparse) {\nswitch( this ) {\ncase ROW_SUMS:\n- return sparse ? \" double %TMP% = LibSpoofPrimitives.vectSum(%IN1v%, %IN1i%, %POS1%, %LEN%);\\n\":\n- \" double %TMP% = LibSpoofPrimitives.vectSum(%IN1%, %POS1%, %LEN%);\\n\";\n+ case ROW_MINS:\n+ case ROW_MAXS:\n+ String vectName = StringUtils.capitalize(this.toString().substring(4,7).toLowerCase());\n+ return sparse ? \" double %TMP% = LibSpoofPrimitives.vect\"+vectName+\"(%IN1v%, %IN1i%, %POS1%, %LEN%);\\n\":\n+ \" double %TMP% = LibSpoofPrimitives.vect\"+vectName+\"(%IN1%, %POS1%, %LEN%);\\n\";\ncase EXP:\nreturn \" double %TMP% = FastMath.exp(%IN1%);\\n\";\ncase LOOKUP_R:\n@@ -153,6 +158,8 @@ public class CNodeUnary extends CNode\npublic String toString() {\nswitch(_type) {\ncase ROW_SUMS: return \"u(R+)\";\n+ case ROW_MINS: return \"u(Rmin)\";\n+ case ROW_MAXS: return \"u(Rmax)\";\ncase LOOKUP_R: return \"u(ixr)\";\ncase LOOKUP_RC: return \"u(ixrc)\";\ncase LOOKUP0: return \"u(ix0)\";\n@@ -165,6 +172,8 @@ public class CNodeUnary extends CNode\npublic void setOutputDims() {\nswitch(_type) {\ncase ROW_SUMS:\n+ case ROW_MINS:\n+ case ROW_MAXS:\ncase EXP:\ncase LOOKUP_R:\ncase LOOKUP_RC:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRowAgg.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRowAgg.java",
"diff": "@@ -55,6 +55,7 @@ import org.apache.sysml.runtime.matrix.data.Pair;\npublic class TemplateRowAgg extends TemplateBase\n{\n+ private static final Hop.AggOp[] SUPPORTED_ROW_AGG = new AggOp[]{AggOp.SUM, AggOp.MIN, AggOp.MAX};\nprivate static final Hop.OpOp2[] SUPPORTED_VECT_BINARY = new OpOp2[]{OpOp2.MULT, OpOp2.DIV,\nOpOp2.EQUAL, OpOp2.NOTEQUAL, OpOp2.LESS, OpOp2.LESSEQUAL, OpOp2.GREATER, OpOp2.GREATEREQUAL};\n@@ -157,11 +158,12 @@ public class TemplateRowAgg extends TemplateBase\nif(hop instanceof AggUnaryOp)\n{\nCNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\n- if( ((AggUnaryOp)hop).getDirection() == Direction.Row && ((AggUnaryOp)hop).getOp() == AggOp.SUM ) {\n+ if( ((AggUnaryOp)hop).getDirection() == Direction.Row && HopRewriteUtils.isAggUnaryOp(hop, SUPPORTED_ROW_AGG) ) {\nif(hop.getInput().get(0).getDim2()==1)\nout = (cdata1.getDataType()==DataType.SCALAR) ? cdata1 : new CNodeUnary(cdata1,UnaryType.LOOKUP_R);\nelse {\n- out = new CNodeUnary(cdata1, UnaryType.ROW_SUMS);\n+ String opcode = \"ROW_\"+((AggUnaryOp)hop).getOp().name().toUpperCase()+\"S\";\n+ out = new CNodeUnary(cdata1, UnaryType.valueOf(opcode));\ninHops2.put(\"X\", hop.getInput().get(0));\n}\n}\n@@ -284,6 +286,10 @@ public class TemplateRowAgg extends TemplateBase\nTernaryType.LOOKUP_RC1);\n}\n+ if( out == null ) {\n+ throw new RuntimeException(hop.getHopID()+\" \"+hop.getOpString());\n+ }\n+\nif( out.getDataType().isMatrix() ) {\nout.setNumRows(hop.getDim1());\nout.setNumCols(hop.getDim2());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"diff": "@@ -74,7 +74,7 @@ public class LibSpoofPrimitives\nreturn c;\n}\n- // custom vector sums\n+ // custom vector sums, mins, maxs\n/**\n* Computes c = sum(A), where A is a dense vectors.\n@@ -89,14 +89,14 @@ public class LibSpoofPrimitives\nfinal int bn = len%8;\n//compute rest\n- for( int i = 0; i < bn; i++, ai++ )\n- val += a[ ai ];\n+ for( int i = ai; i < ai+bn; i++ )\n+ val += a[ i ];\n//unrolled 8-block (for better instruction-level parallelism)\n- for( int i = bn; i < len; i+=8, ai+=8 ) {\n+ for( int i = ai+bn; i < ai+len; i+=8 ) {\n//read 64B cacheline of a, compute cval' = sum(a) + cval\n- val += a[ ai+0 ] + a[ ai+1 ] + a[ ai+2 ] + a[ ai+3 ]\n- + a[ ai+4 ] + a[ ai+5 ] + a[ ai+6 ] + a[ ai+7 ];\n+ val += a[ i+0 ] + a[ i+1 ] + a[ i+2 ] + a[ i+3 ]\n+ + a[ i+4 ] + a[ i+5 ] + a[ i+6 ] + a[ i+7 ];\n}\n//scalar result\n@@ -118,23 +118,51 @@ public class LibSpoofPrimitives\n//compute rest\nfor( int i = ai; i < ai+bn; i++ )\n- val += avals[ ai+aix[i] ];\n+ val += avals[ aix[i] ];\n//unrolled 8-block (for better instruction-level parallelism)\nfor( int i = ai+bn; i < ai+len; i+=8 )\n{\n//read 64B of a via 'gather'\n//compute cval' = sum(a) + cval\n- val += avals[ ai+aix[i+0] ] + avals[ ai+aix[i+1] ]\n- + avals[ ai+aix[i+2] ] + avals[ ai+aix[i+3] ]\n- + avals[ ai+aix[i+4] ] + avals[ ai+aix[i+5] ]\n- + avals[ ai+aix[i+6] ] + avals[ ai+aix[i+7] ];\n+ val += avals[ aix[i+0] ] + avals[ aix[i+1] ]\n+ + avals[ aix[i+2] ] + avals[ aix[i+3] ]\n+ + avals[ aix[i+4] ] + avals[ aix[i+5] ]\n+ + avals[ aix[i+6] ] + avals[ aix[i+7] ];\n}\n//scalar result\nreturn val;\n}\n+ public static double vectMin(double[] a, int ai, int len) {\n+ double val = Double.MAX_VALUE;\n+ for( int i = ai; i < ai+len; i++ )\n+ val = Math.min(a[ai], val);\n+ return val;\n+ }\n+\n+ public static double vectMin(double[] avals, int[] aix, int ai, int len) {\n+ double val = Double.MAX_VALUE;\n+ for( int i = ai; i < ai+len; i++ )\n+ val = Math.min(avals[aix[i]], val);\n+ return val;\n+ }\n+\n+ public static double vectMax(double[] a, int ai, int len) {\n+ double val = -Double.MAX_VALUE;\n+ for( int i = ai; i < ai+len; i++ )\n+ val = Math.max(a[ai], val);\n+ return val;\n+ }\n+\n+ public static double vectMax(double[] avals, int[] aix, int ai, int len) {\n+ double val = -Double.MAX_VALUE;\n+ for( int i = ai; i < ai+len; i++ )\n+ val = Math.max(avals[aix[i]], val);\n+ return val;\n+ }\n+\n//custom vector div\npublic static void vectDivAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n@@ -202,7 +230,7 @@ public class LibSpoofPrimitives\npublic static double[] vectNotequalWrite(double[] a, double bval, int ai, int len) {\ndouble[] c = allocVector(len, false);\nfor( int j = 0; j < len; j++, ai++)\n- c[j] = (a[j] != bval) ? 1 : 0;\n+ c[j] = (a[ai] != bval) ? 1 : 0;\nreturn c;\n}\n@@ -228,7 +256,7 @@ public class LibSpoofPrimitives\npublic static double[] vectLessWrite(double[] a, double bval, int ai, int len) {\ndouble[] c = allocVector(len, false);\nfor( int j = 0; j < len; j++, ai++)\n- c[j] = (a[j] < bval) ? 1 : 0;\n+ c[j] = (a[ai] < bval) ? 1 : 0;\nreturn c;\n}\n@@ -254,7 +282,7 @@ public class LibSpoofPrimitives\npublic static double[] vectLessequalWrite(double[] a, double bval, int ai, int len) {\ndouble[] c = allocVector(len, false);\nfor( int j = 0; j < len; j++, ai++)\n- c[j] = (a[j] <= bval) ? 1 : 0;\n+ c[j] = (a[ai] <= bval) ? 1 : 0;\nreturn c;\n}\n@@ -280,7 +308,7 @@ public class LibSpoofPrimitives\npublic static double[] vectGreaterWrite(double[] a, double bval, int ai, int len) {\ndouble[] c = allocVector(len, false);\nfor( int j = 0; j < len; j++, ai++)\n- c[j] = (a[j] > bval) ? 1 : 0;\n+ c[j] = (a[ai] > bval) ? 1 : 0;\nreturn c;\n}\n@@ -306,7 +334,7 @@ public class LibSpoofPrimitives\npublic static double[] vectGreaterequalWrite(double[] a, double bval, int ai, int len) {\ndouble[] c = allocVector(len, false);\nfor( int j = 0; j < len; j++, ai++)\n- c[j] = (a[j] >= bval) ? 1 : 0;\n+ c[j] = (a[ai] >= bval) ? 1 : 0;\nreturn c;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/AlgorithmLinregCG.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/AlgorithmLinregCG.java",
"diff": "@@ -41,9 +41,7 @@ public class AlgorithmLinregCG extends AutomatedTestBase\nprivate final static String TEST_CONF = \"SystemML-config-codegen.xml\";\nprivate final static File TEST_CONF_FILE = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF);\n- //TODO Investigate numerical stability issues: on certain platforms this test, occasionally fails,\n- //for 1e-5 (specifically testLinregCGSparseRewritesCP); apparently due to the -(-(X)) -> X rewrite.\n- private final static double eps = 1e-1;\n+ private final static double eps = 1e-5;\nprivate final static int rows = 2468;\nprivate final static int cols = 507;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java",
"diff": "@@ -45,6 +45,7 @@ public class RowAggTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME7 = TEST_NAME+\"7\"; //t(X)%*%(X%*%v-y); sum((X%*%v-y)^2);\nprivate static final String TEST_NAME8 = TEST_NAME+\"8\"; //colSums((X/rowSums(X))>0.7)\nprivate static final String TEST_NAME9 = TEST_NAME+\"9\"; //t(X) %*% (v - abs(y))\n+ private static final String TEST_NAME10 = TEST_NAME+\"10\"; //Y=(X<=rowMins(X)); R=colSums((Y/rowSums(Y)));\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RowAggTmplTest.class.getSimpleName() + \"/\";\n@@ -56,7 +57,7 @@ public class RowAggTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for(int i=1; i<=9; i++)\n+ for(int i=1; i<=10; i++)\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME+i, new String[] { String.valueOf(i) }) );\n}\n@@ -105,6 +106,11 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME9, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenRowAggRewrite10() {\n+ testCodegenIntegration( TEST_NAME10, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenRowAgg1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -150,6 +156,11 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME9, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenRowAgg10() {\n+ testCodegenIntegration( TEST_NAME10, false, ExecType.CP );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern10.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+\n+X = matrix(seq(1,1500), 150, 10, byrow=TRUE);\n+\n+Y = (X <= rowMins(X));\n+Z = (Y / rowSums(Y));\n+R = t(colSums(Z));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern10.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(1,1500), rows=150, cols=10);\n+\n+Y = (X <= rowMins(X));\n+Z = (Y / rowSums(Y));\n+R = colSums(Z);\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1448] Support rowMins/rowMaxs in codegen row aggregates
This patch extends the codegen compiler by rowmins and rowmax for row
aggregate templates and introduces the related runtime primitives.
Furthermore, this patch also fixes various existing primitives such as
sparse rowSums, and dense vector comparisons. |
49,736 | 08.04.2017 19:28:28 | 28,800 | 8a5450dde5b55c6fb67d9fb034b69e5eafa15bf7 | Bugfix for scipy csr and numpy to matrixblock conversion
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/converters.py",
"new_path": "src/main/python/systemml/converters.py",
"diff": "@@ -80,7 +80,8 @@ def _convertDenseMatrixToMB(sc, src):\ndef _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen):\nrowIndex = int(i / numRowsPerBlock)\n- mb = _convertSPMatrixToMB(sc, src[i:i+numRowsPerBlock,]) if isinstance(src, spmatrix) else _convertDenseMatrixToMB(sc, src[i:i+numRowsPerBlock,])\n+ tmp = src[i:min(i+numRowsPerBlock, rlen),]\n+ mb = _convertSPMatrixToMB(sc, tmp) if isinstance(src, spmatrix) else _convertDenseMatrixToMB(sc, tmp)\nsc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.copyRowBlocks(mb, rowIndex, ret, numRowsPerBlock, rlen, clen)\nreturn i\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1474] Bugfix for scipy csr and numpy to matrixblock conversion
Closes #455. |
49,768 | 10.04.2017 16:05:23 | 25,200 | fb55a74d1441a1186df577f9a8674de34c6c64a2 | Add Pygments license in source distribution
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/assembly/source/LICENSE",
"new_path": "src/assembly/source/LICENSE",
"diff": "@@ -322,6 +322,36 @@ available at https://github.com/jquery/sizzle\nThe following license applies to all parts of this software except as\ndocumented below:\n+================================================================================\n+\n+Pygments (pygments-default.css) is distributed under the BSD license:\n+\n+Copyright (c) 2006-2017 by the respective authors (see AUTHORS file).\n+All rights reserved.\n+\n+Redistribution and use in source and binary forms, with or without\n+modification, are permitted provided that the following conditions are\n+met:\n+\n+* Redistributions of source code must retain the above copyright\n+ notice, this list of conditions and the following disclaimer.\n+\n+* Redistributions in binary form must reproduce the above copyright\n+ notice, this list of conditions and the following disclaimer in the\n+ documentation and/or other materials provided with the distribution.\n+\n+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n+\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+\n====\nPermission is hereby granted, free of charge, to any person obtaining\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1510] Add Pygments license in source distribution
Closes #456. |
49,772 | 10.04.2017 17:20:13 | 25,200 | f5ef628c0dbe4e5ce8dec61f5e05c5597e341c95 | Rename `batch_norm.dml` and `spatial_batch_norm.dml`
Rename `batch_norm.dml` and `spatial_batch_norm.dml` to
`batch_norm1d.dml` and `batch_norm2d.dml`.
Closes | [
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/nn/layers/batch_norm.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/batch_norm1d.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * Batch Normalization layer.\n+ * 1D Batch Normalization layer.\n*/\nforward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\n@@ -29,7 +29,8 @@ forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\nreturn (matrix[double] out, matrix[double] ema_mean_upd, matrix[double] ema_var_upd,\nmatrix[double] cache_mean, matrix[double] cache_var, matrix[double] cache_norm) {\n/*\n- * Computes the forward pass for a batch normalization layer.\n+ * Computes the forward pass for a 1D batch normalization layer.\n+ * The input data has N examples, each with D features.\n*\n* A batch normalization layer uses the per-feature sample mean and\n* per-feature uncorrected sample variance during training to\n@@ -116,7 +117,7 @@ backward = function(matrix[double] dout, matrix[double] out,\ndouble mu, double epsilon)\nreturn (matrix[double] dX, matrix[double] dgamma, matrix[double] dbeta) {\n/*\n- * Computes the backward pass for a batch normalization layer.\n+ * Computes the backward pass for a 1D batch normalization layer.\n*\n* Inputs:\n* - dout: Gradient wrt `out` from upstream, of shape (N, D).\n@@ -165,7 +166,7 @@ backward = function(matrix[double] dout, matrix[double] out,\nif (mode == 'train') {\n# Compute gradients during training\n- dgamma = colSums(norm*dout) # shape (1, D)\n+ dgamma = colSums(dout*norm) # shape (1, D)\ndbeta = colSums(dout) # shape (1, D)\ndnorm = dout * gamma # shape (N, D)\ndvar = (-1/2) * colSums(centered * (var+epsilon)^(-3/2) * dnorm) # shape (1, D)\n@@ -174,7 +175,7 @@ backward = function(matrix[double] dout, matrix[double] out,\n}\nelse {\n# Compute gradients during testing\n- dgamma = colSums(norm*dout) # shape (1, D)\n+ dgamma = colSums(dout*norm) # shape (1, D)\ndbeta = colSums(dout) # shape (1, D)\ndnorm = dout * gamma # shape (N, D)\ndX = dnorm / sqrt(var+epsilon) # shape (N, D)\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/nn/layers/spatial_batch_norm.dml",
"new_path": "scripts/staging/SystemML-NN/nn/layers/batch_norm2d.dml",
"diff": "#-------------------------------------------------------------\n/*\n- * Spatial Batch Normalization layer.\n+ * 2D (Spatial) Batch Normalization layer.\n*/\nsource(\"nn/util.dml\") as util\n@@ -31,7 +31,9 @@ forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\nreturn (matrix[double] out, matrix[double] ema_mean_upd, matrix[double] ema_var_upd,\nmatrix[double] cache_mean, matrix[double] cache_var, matrix[double] cache_norm) {\n/*\n- * Computes the forward pass for a spatial batch normalization layer.\n+ * Computes the forward pass for a 2D (spatial) batch normalization\n+ * layer. The input data has N examples, each represented as a 3D\n+ * volume unrolled into a single vector.\n*\n* A spatial batch normalization layer uses the per-channel sample\n* mean and per-channel uncorrected sample variance during training\n@@ -131,7 +133,8 @@ backward = function(matrix[double] dout, matrix[double] out,\ndouble mu, double epsilon)\nreturn (matrix[double] dX, matrix[double] dgamma, matrix[double] dbeta) {\n/*\n- * Computes the backward pass for a spatial batch normalization layer.\n+ * Computes the backward pass for a 2D (spatial) batch normalization\n+ * layer.\n*\n* Inputs:\n* - dout: Gradient wrt `out` from upstream, of shape (N, C*Hin*Win).\n@@ -184,7 +187,7 @@ backward = function(matrix[double] dout, matrix[double] out,\nif (mode == 'train') {\n# Compute gradients during training\n- dgamma = util::channel_sums(norm*dout, C, Hin, Win) # shape (C, 1)\n+ dgamma = util::channel_sums(dout*norm, C, Hin, Win) # shape (C, 1)\ndbeta = util::channel_sums(dout, C, Hin, Win) # shape (C, 1)\ndnorm = bias_multiply(dout, gamma) # shape (N, C*Hin*Win)\ndvar = util::channel_sums((-1/2) * bias_multiply(centered, (var+epsilon)^(-3/2)) * dnorm,\n@@ -200,7 +203,7 @@ backward = function(matrix[double] dout, matrix[double] out,\n}\nelse {\n# Compute gradients during testing\n- dgamma = util::channel_sums(norm*dout, C, Hin, Win) # shape (C, 1)\n+ dgamma = util::channel_sums(dout*norm, C, Hin, Win) # shape (C, 1)\ndbeta = util::channel_sums(dout, C, Hin, Win) # shape (C, 1)\ndnorm = bias_multiply(dout, gamma) # shape (N, C*Hin*Win)\ndX = bias_multiply(dnorm, 1/sqrt(var+epsilon)) # shape (N, C*Hin*Win)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/test/grad_check.dml",
"new_path": "scripts/staging/SystemML-NN/nn/test/grad_check.dml",
"diff": "* Gradient checks for various architectures.\n*/\nsource(\"nn/layers/affine.dml\") as affine\n-source(\"nn/layers/batch_norm.dml\") as batch_norm\n+source(\"nn/layers/batch_norm1d.dml\") as batch_norm1d\n+source(\"nn/layers/batch_norm2d.dml\") as batch_norm2d\nsource(\"nn/layers/conv2d.dml\") as conv2d\nsource(\"nn/layers/conv2d_builtin.dml\") as conv2d_builtin\nsource(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\n@@ -40,7 +41,6 @@ source(\"nn/layers/relu.dml\") as relu\nsource(\"nn/layers/rnn.dml\") as rnn\nsource(\"nn/layers/sigmoid.dml\") as sigmoid\nsource(\"nn/layers/softmax.dml\") as softmax\n-source(\"nn/layers/spatial_batch_norm.dml\") as spatial_batch_norm\nsource(\"nn/layers/tanh.dml\") as tanh\nsource(\"nn/test/conv2d_simple.dml\") as conv2d_simple\nsource(\"nn/test/max_pool2d_simple.dml\") as max_pool2d_simple\n@@ -125,11 +125,11 @@ affine = function() {\n}\n}\n-batch_norm = function() {\n+batch_norm1d = function() {\n/*\n- * Gradient check for the batch normalization layer.\n+ * Gradient check for the 1D batch normalization layer.\n*/\n- print(\"Grad checking the batch normalization layer with L2 loss.\")\n+ print(\"Grad checking the 1D batch normalization layer with L2 loss.\")\n# Generate data\nN = 3 # num examples\n@@ -142,7 +142,7 @@ batch_norm = function() {\nbeta = rand(rows=1, cols=D)\nema_mean = rand(rows=1, cols=D)\nema_var = rand(rows=1, cols=D)\n- #[dummy, dummy, ema_mean, ema_var] = batch_norm::init(D)\n+ #[dummy, dummy, ema_mean, ema_var] = batch_norm1d::init(D)\n# Check training & testing modes\nfor (i in 1:2) {\n@@ -154,9 +154,9 @@ batch_norm = function() {\n# Compute analytical gradients of loss wrt parameters\n[out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\ndout = l2_loss::backward(out, y)\n- [dX, dgamma, dbeta] = batch_norm::backward(dout, out, ema_mean_upd, ema_var_upd,\n+ [dX, dgamma, dbeta] = batch_norm1d::backward(dout, out, ema_mean_upd, ema_var_upd,\ncache_mean, cache_var, cache_norm,\nX, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n@@ -169,11 +169,11 @@ batch_norm = function() {\nold = as.scalar(X[i,j])\nX[i,j] = old - h\n[outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\nX[i,j] = old + h\n[outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\nX[i,j] = old # reset\ndX_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -190,11 +190,11 @@ batch_norm = function() {\nold = as.scalar(gamma[i,j])\ngamma[i,j] = old - h\n[outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\ngamma[i,j] = old + h\n[outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\ngamma[i,j] = old # reset\ndgamma_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -212,11 +212,11 @@ batch_norm = function() {\nold = as.scalar(beta[i,j])\nbeta[i,j] = old - h\n[outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\nbeta[i,j] = old + h\n[outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\nbeta[i,j] = old # reset\ndbeta_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -1276,11 +1276,11 @@ softmax = function() {\n}\n}\n-spatial_batch_norm = function() {\n+batch_norm2d = function() {\n/*\n- * Gradient check for the spatial batch normalization layer.\n+ * Gradient check for the 2D (spatial) batch normalization layer.\n*/\n- print(\"Grad checking the spatial batch normalization layer with L2 loss.\")\n+ print(\"Grad checking the 2D (spatial) batch normalization layer with L2 loss.\")\n# Generate data\nN = 3 # num examples\n@@ -1296,7 +1296,7 @@ spatial_batch_norm = function() {\nbeta = rand(rows=C, cols=1)\nema_mean = rand(rows=C, cols=1)\nema_var = rand(rows=C, cols=1)\n- #[dummy, dummy, ema_mean, ema_var] = spatial_batch_norm::init(C)\n+ #[dummy, dummy, ema_mean, ema_var] = batch_norm2d::init(C)\n# Check training & testing modes\nfor (i in 1:2) {\n@@ -1308,9 +1308,9 @@ spatial_batch_norm = function() {\n# Compute analytical gradients of loss wrt parameters\n[out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\ndout = l2_loss::backward(out, y)\n- [dX, dgamma, dbeta] = spatial_batch_norm::backward(dout, out, ema_mean_upd, ema_var_upd,\n+ [dX, dgamma, dbeta] = batch_norm2d::backward(dout, out, ema_mean_upd, ema_var_upd,\ncache_mean, cache_var, cache_norm,\nX, gamma, beta, C, Hin, Win, mode,\nema_mean, ema_var, mu, eps)\n@@ -1324,13 +1324,11 @@ spatial_batch_norm = function() {\nold = as.scalar(X[i,j])\nX[i,j] = old - h\n[outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\nX[i,j] = old + h\n[outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\nX[i,j] = old # reset\ndX_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -1347,13 +1345,11 @@ spatial_batch_norm = function() {\nold = as.scalar(gamma[i,j])\ngamma[i,j] = old - h\n[outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\ngamma[i,j] = old + h\n[outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\ngamma[i,j] = old # reset\ndgamma_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -1371,13 +1367,11 @@ spatial_batch_norm = function() {\nold = as.scalar(beta[i,j])\nbeta[i,j] = old - h\n[outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossmh = l2_loss::forward(outmh, y)\nbeta[i,j] = old + h\n[outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\nlossph = l2_loss::forward(outph, y)\nbeta[i,j] = old # reset\ndbeta_num = (lossph-lossmh) / (2*h) # numerical derivative\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/test/run_tests.dml",
"new_path": "scripts/staging/SystemML-NN/nn/test/run_tests.dml",
"diff": "@@ -37,7 +37,8 @@ tmp = grad_check::log_loss()\n# Other layers\ntmp = grad_check::affine()\n-tmp = grad_check::batch_norm()\n+tmp = grad_check::batch_norm1d()\n+tmp = grad_check::batch_norm2d()\ntmp = grad_check::conv2d_simple()\ntmp = grad_check::conv2d()\ntmp = grad_check::conv2d_builtin()\n@@ -52,7 +53,6 @@ tmp = grad_check::relu()\ntmp = grad_check::rnn()\ntmp = grad_check::sigmoid()\ntmp = grad_check::softmax()\n-tmp = grad_check::spatial_batch_norm()\ntmp = grad_check::tanh()\n# Example model\n@@ -69,13 +69,13 @@ print(\"\")\nprint(\"Starting other tests.\")\nprint(\"---\")\n-tmp = test::batch_norm()\n+tmp = test::batch_norm1d()\n+tmp = test::batch_norm2d()\ntmp = test::im2col()\ntmp = test::padding()\ntmp = test::conv2d()\ntmp = test::cross_entropy_loss()\ntmp = test::max_pool2d()\n-tmp = test::spatial_batch_norm()\ntmp = test::tanh()\nprint(\"---\")\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/test/test.dml",
"new_path": "scripts/staging/SystemML-NN/nn/test/test.dml",
"diff": "/*\n* Various tests, not including gradient checks.\n*/\n-source(\"nn/layers/batch_norm.dml\") as batch_norm\n+source(\"nn/layers/batch_norm1d.dml\") as batch_norm1d\n+source(\"nn/layers/batch_norm2d.dml\") as batch_norm2d\nsource(\"nn/layers/conv2d.dml\") as conv2d\nsource(\"nn/layers/conv2d_builtin.dml\") as conv2d_builtin\nsource(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\nsource(\"nn/layers/max_pool2d.dml\") as max_pool2d\nsource(\"nn/layers/max_pool2d_builtin.dml\") as max_pool2d_builtin\n-source(\"nn/layers/spatial_batch_norm.dml\") as spatial_batch_norm\nsource(\"nn/layers/tanh.dml\") as tanh\nsource(\"nn/test/conv2d_simple.dml\") as conv2d_simple\nsource(\"nn/test/max_pool2d_simple.dml\") as max_pool2d_simple\nsource(\"nn/test/util.dml\") as test_util\nsource(\"nn/util.dml\") as util\n-batch_norm = function() {\n+batch_norm1d = function() {\n/*\n- * Test for the batch normalization function.\n+ * Test for the 1D batch normalization function.\n*/\n- print(\"Testing the batch normalization function.\")\n+ print(\"Testing the 1D batch normalization function.\")\n# Generate data\nN = 4 # Number of examples\n@@ -50,11 +50,11 @@ batch_norm = function() {\nX = matrix(seq(1,16), rows=N, cols=D)\n# Create layer\n- [gamma, beta, ema_mean, ema_var] = batch_norm::init(D)\n+ [gamma, beta, ema_mean, ema_var] = batch_norm1d::init(D)\n# Forward\n[out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm1d::forward(X, gamma, beta, mode, ema_mean, ema_var, mu, eps)\n# Equivalency check\ntarget = matrix(\"-1.34160721 -1.34160721 -1.34160733 -1.34160709\n@@ -428,11 +428,11 @@ max_pool2d = function() {\ntmp = test_util::check_all_equal(out_builtin, target)\n}\n-spatial_batch_norm = function() {\n+batch_norm2d = function() {\n/*\n- * Test for the spatial batch normalization function.\n+ * Test for the 2D (spatial) batch normalization function.\n*/\n- print(\"Testing the spatial batch normalization function.\")\n+ print(\"Testing the 2D (spatial) batch normalization function.\")\n# Generate data\nN = 2 # Number of examples\n@@ -474,11 +474,11 @@ spatial_batch_norm = function() {\n55 58 52 0 99\", rows=N, cols=C*Hin*Win)\n# Create layer\n- [gamma, beta, ema_mean, ema_var] = spatial_batch_norm::init(C)\n+ [gamma, beta, ema_mean, ema_var] = batch_norm2d::init(C)\n# Forward\n[out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- spatial_batch_norm::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n# Equivalency check\ntarget = matrix(\"0.86215019 -0.76679718 -1.00517964 0.26619387 0.94161105\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1463] Rename `batch_norm.dml` and `spatial_batch_norm.dml`
Rename `batch_norm.dml` and `spatial_batch_norm.dml` to
`batch_norm1d.dml` and `batch_norm2d.dml`.
Closes #453. |
49,772 | 10.04.2017 17:20:55 | 25,200 | 6517256511b5953b4efea97600164261243a8402 | Add new 1D/2D "Scale & Shift" layers
A "Scale & Shift" layer introduces learnable parameters
(`gamma`, `beta`) to scale and shift the input on either
a per-feature basis (1D) or a per-channel basis (2D).
`y = x*gamma + beta`
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/SystemML-NN/nn/layers/scale_shift1d.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * 1D Scale & Shift layer.\n+ */\n+\n+forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta)\n+ return (matrix[double] out) {\n+ /*\n+ * Computes the forward pass for a 1D scale & shift layer. The input\n+ * data has N examples, each with D features.\n+ *\n+ * A 1D scale & shift layer introduces learnable parameters\n+ * (gamma, beta) to scale and shift the input on a per-feature basis.\n+ *\n+ * `y = x*gamma + beta`\n+ *\n+ * Inputs:\n+ * - X: Inputs, of shape (N, D).\n+ * - gamma: Scale parameters, of shape (1, D).\n+ * - beta: Shift parameters, of shape (1, D).\n+ *\n+ * Outputs:\n+ * - out: Outputs, of shape (N, D).\n+ */\n+ # Scale and shift\n+ out = X*gamma + beta # shape (N, D)\n+}\n+\n+backward = function(matrix[double] dout, matrix[double] out,\n+ matrix[double] X, matrix[double] gamma, matrix[double] beta)\n+ return (matrix[double] dX, matrix[double] dgamma, matrix[double] dbeta) {\n+ /*\n+ * Computes the backward pass for a 1D scale & shift layer.\n+ *\n+ * Inputs:\n+ * - dout: Gradient wrt `out` from upstream, of shape (N, D).\n+ * - out: Outputs from the forward pass, of shape (N, D).\n+ * - X: Inputs, of shape (N, D).\n+ * - gamma: Scale parameters, of shape (1, D).\n+ * - beta: Shift parameters, of shape (1, D).\n+ *\n+ * Outputs:\n+ * - dX: Gradient wrt `X`, of shape (N, D).\n+ * - dgamma: Gradient wrt `W`, of shape (1, D).\n+ * - dbeta: Gradient wrt `b`, of shape (1, D).\n+ *\n+ */\n+ # Compute gradients during training\n+ dgamma = colSums(dout*X) # shape (1, D)\n+ dbeta = colSums(dout) # shape (1, D)\n+ dX = dout * gamma # shape (N, D)\n+}\n+\n+init = function(int D)\n+ return (matrix[double] gamma, matrix[double] beta) {\n+ /*\n+ * Initialize the parameters of this layer.\n+ *\n+ * By default, we initialize to an identity function, with a scale\n+ * filler of `1`, and a shift filler of `0`.\n+ *\n+ * Note: This is just a convenience function, and parameters\n+ * may be initialized manually if needed.\n+ *\n+ * Inputs:\n+ * - D: Dimensionality of the input features (number of features).\n+ *\n+ * Outputs:\n+ * - gamma: Scale parameters, of shape (1, D).\n+ * - beta: Shift parameters, of shape (1, D).\n+ */\n+ gamma = matrix(1, rows=1, cols=D)\n+ beta = matrix(0, rows=1, cols=D)\n+}\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/SystemML-NN/nn/layers/scale_shift2d.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * 2D Scale & Shift layer.\n+ */\n+source(\"nn/util.dml\") as util\n+\n+forward = function(matrix[double] X, matrix[double] gamma, matrix[double] beta,\n+ int C, int Hin, int Win)\n+ return (matrix[double] out) {\n+ /*\n+ * Computes the forward pass for a 2D scale & shift layer. The input\n+ * data has N examples, each represented as a 3D volume unrolled into\n+ * a single vector.\n+ *\n+ * A 2D scale & shift layer introduces learnable parameters\n+ * (gamma, beta) to scale and shift the input on a per-channel basis.\n+ *\n+ * `y = x*gamma + beta`\n+ *\n+ * Inputs:\n+ * - X: Inputs, of shape (N, C*Hin*Win).\n+ * - gamma: Scale parameters, of shape (C, 1).\n+ * - beta: Shift parameters, of shape (C, 1).\n+ * - C: Number of input channels (dimensionality of input depth).\n+ * - Hin: Input height.\n+ * - Win: Input width.\n+ *\n+ * Outputs:\n+ * - out: Outputs, of shape (N, C*Hin*Win).\n+ */\n+ # Scale and shift\n+ scaled = bias_multiply(X, gamma) # shape (N, C*Hin*Win)\n+ out = bias_add(scaled, beta) # shape (N, C*Hin*Win)\n+}\n+\n+backward = function(matrix[double] dout, matrix[double] out,\n+ matrix[double] X, matrix[double] gamma, matrix[double] beta,\n+ int C, int Hin, int Win)\n+ return (matrix[double] dX, matrix[double] dgamma, matrix[double] dbeta) {\n+ /*\n+ * Computes the backward pass for a 2D scale & shift layer.\n+ *\n+ * Inputs:\n+ * - dout: Gradient wrt `out` from upstream, of shape (N, C*Hin*Win).\n+ * - out: Outputs from the forward pass, of shape (N, C*Hin*Win).\n+ * - X: Input data matrix to the forward pass, of\n+ * shape (N, C*Hin*Win).\n+ * - gamma: Scale parameters, of shape (C, 1).\n+ * - beta: Shift parameters, of shape (C, 1).\n+ * - C: Number of input channels (dimensionality of input depth).\n+ * - Hin: Input height.\n+ * - Win: Input width.\n+ *\n+ * Outputs:\n+ * - dX: Gradient wrt `X`, of shape (N, C*Hin*Win).\n+ * - dgamma: Gradient wrt `W`, of shape (C, 1).\n+ * - dbeta: Gradient wrt `b`, of shape (C, 1).\n+ *\n+ */\n+ # Compute gradients during training\n+ dgamma = util::channel_sums(dout*X, C, Hin, Win) # shape (C, 1)\n+ dbeta = util::channel_sums(dout, C, Hin, Win) # shape (C, 1)\n+ dX = bias_multiply(dout, gamma) # shape (N, C*Hin*Win)\n+}\n+\n+init = function(int C)\n+ return (matrix[double] gamma, matrix[double] beta) {\n+ /*\n+ * Initialize the parameters of this layer.\n+ *\n+ * By default, we initialize to an identity function, with a scale\n+ * filler of `1`, and a shift filler of `0`.\n+ *\n+ * Note: This is just a convenience function, and parameters\n+ * may be initialized manually if needed.\n+ *\n+ * Inputs:\n+ * - C: Number of input channels (dimensionality of input depth).\n+ *\n+ * Outputs:\n+ * - gamma: Scale parameters, of shape (C, 1).\n+ * - beta: Shift parameters, of shape (C, 1).\n+ */\n+ gamma = matrix(1, rows=C, cols=1)\n+ beta = matrix(0, rows=C, cols=1)\n+}\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/test/grad_check.dml",
"new_path": "scripts/staging/SystemML-NN/nn/test/grad_check.dml",
"diff": "@@ -39,6 +39,8 @@ source(\"nn/layers/max_pool2d.dml\") as max_pool2d\nsource(\"nn/layers/max_pool2d_builtin.dml\") as max_pool2d_builtin\nsource(\"nn/layers/relu.dml\") as relu\nsource(\"nn/layers/rnn.dml\") as rnn\n+source(\"nn/layers/scale_shift1d.dml\") as scale_shift1d\n+source(\"nn/layers/scale_shift2d.dml\") as scale_shift2d\nsource(\"nn/layers/sigmoid.dml\") as sigmoid\nsource(\"nn/layers/softmax.dml\") as softmax\nsource(\"nn/layers/tanh.dml\") as tanh\n@@ -229,6 +231,113 @@ batch_norm1d = function() {\n}\n}\n+batch_norm2d = function() {\n+ /*\n+ * Gradient check for the 2D (spatial) batch normalization layer.\n+ */\n+ print(\"Grad checking the 2D (spatial) batch normalization layer with L2 loss.\")\n+\n+ # Generate data\n+ N = 3 # num examples\n+ C = 2 # num channels\n+ Hin = 5 # input height\n+ Win = 5 # input width\n+ mu = 0.9 # momentum\n+ eps = 1e-5 # epsilon\n+ X = rand(rows=N, cols=C*Hin*Win)\n+ y = rand(rows=N, cols=C*Hin*Win)\n+ gamma = rand(rows=C, cols=1)\n+ beta = rand(rows=C, cols=1)\n+ ema_mean = rand(rows=C, cols=1)\n+ ema_var = rand(rows=C, cols=1)\n+ #[dummy, dummy, ema_mean, ema_var] = batch_norm2d::init(C)\n+\n+ # Check training & testing modes\n+ for (i in 1:2) {\n+ if (i == 1)\n+ mode = 'train'\n+ else\n+ mode = 'test'\n+ print(\" - Grad checking the '\"+mode+\"' mode.\")\n+\n+ # Compute analytical gradients of loss wrt parameters\n+ [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ dout = l2_loss::backward(out, y)\n+ [dX, dgamma, dbeta] = batch_norm2d::backward(dout, out, ema_mean_upd, ema_var_upd,\n+ cache_mean, cache_var, cache_norm,\n+ X, gamma, beta, C, Hin, Win, mode,\n+ ema_mean, ema_var, mu, eps)\n+\n+ # Grad check\n+ h = 1e-5\n+ print(\" - Grad checking X.\")\n+ for (i in 1:nrow(X)) {\n+ for (j in 1:ncol(X)) {\n+ # Compute numerical derivative\n+ old = as.scalar(X[i,j])\n+ X[i,j] = old - h\n+ [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ lossmh = l2_loss::forward(outmh, y)\n+ X[i,j] = old + h\n+ [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ lossph = l2_loss::forward(outph, y)\n+ X[i,j] = old # reset\n+ dX_num = (lossph-lossmh) / (2*h) # numerical derivative\n+\n+ # Check error\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dX[i,j]), dX_num, lossph, lossmh)\n+ }\n+ }\n+\n+ print(\" - Grad checking gamma.\")\n+ for (i in 1:nrow(gamma)) {\n+ for (j in 1:ncol(gamma)) {\n+ # Compute numerical derivative\n+ old = as.scalar(gamma[i,j])\n+ gamma[i,j] = old - h\n+ [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ lossmh = l2_loss::forward(outmh, y)\n+ gamma[i,j] = old + h\n+ [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ lossph = l2_loss::forward(outph, y)\n+ gamma[i,j] = old # reset\n+ dgamma_num = (lossph-lossmh) / (2*h) # numerical derivative\n+\n+ # Check error\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dgamma[i,j]), dgamma_num,\n+ lossph, lossmh)\n+ }\n+ }\n+\n+ print(\" - Grad checking beta.\")\n+ for (i in 1:nrow(beta)) {\n+ for (j in 1:ncol(beta)) {\n+ # Compute numerical derivative\n+ old = as.scalar(beta[i,j])\n+ beta[i,j] = old - h\n+ [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ lossmh = l2_loss::forward(outmh, y)\n+ beta[i,j] = old + h\n+ [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n+ batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ lossph = l2_loss::forward(outph, y)\n+ beta[i,j] = old # reset\n+ dbeta_num = (lossph-lossmh) / (2*h) # numerical derivative\n+\n+ # Check error\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dbeta[i,j]), dbeta_num,\n+ lossph, lossmh)\n+ }\n+ }\n+ }\n+}\n+\nconv2d = function() {\n/*\n* Gradient check for the 2D convolutional layer using `im2col`.\n@@ -1199,34 +1308,36 @@ rnn = function() {\n}\n}\n-sigmoid = function() {\n+scale_shift1d = function() {\n/*\n- * Gradient check for the sigmoid nonlinearity layer.\n+ * Gradient check for the 1D scale & shift layer.\n*/\n- print(\"Grad checking the sigmoid nonlinearity layer with L2 loss.\")\n+ print(\"Grad checking the 1D scale & shift layer with L2 loss.\")\n# Generate data\nN = 3 # num examples\n- M = 10 # num neurons\n- X = rand(rows=N, cols=M)\n- y = rand(rows=N, cols=M)\n+ D = 100 # num features\n+ X = rand(rows=N, cols=D)\n+ y = rand(rows=N, cols=D)\n+ [gamma, beta] = scale_shift1d::init(D)\n# Compute analytical gradients of loss wrt parameters\n- out = sigmoid::forward(X)\n+ out = scale_shift1d::forward(X, gamma, beta)\ndout = l2_loss::backward(out, y)\n- dX = sigmoid::backward(dout, X)\n+ [dX, dgamma, dbeta] = scale_shift1d::backward(dout, out, X, gamma, beta)\n# Grad check\nh = 1e-5\n+ print(\" - Grad checking X.\")\nfor (i in 1:nrow(X)) {\nfor (j in 1:ncol(X)) {\n# Compute numerical derivative\nold = as.scalar(X[i,j])\nX[i,j] = old - h\n- outmh = sigmoid::forward(X)\n+ outmh = scale_shift1d::forward(X, gamma, beta)\nlossmh = l2_loss::forward(outmh, y)\nX[i,j] = old + h\n- outph = sigmoid::forward(X)\n+ outph = scale_shift1d::forward(X, gamma, beta)\nlossph = l2_loss::forward(outph, y)\nX[i,j] = old # reset\ndX_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -1235,85 +1346,67 @@ sigmoid = function() {\nrel_error = test_util::check_rel_grad_error(as.scalar(dX[i,j]), dX_num, lossph, lossmh)\n}\n}\n-}\n-\n-softmax = function() {\n- /*\n- * Gradient check for the softmax layer.\n- */\n- print(\"Grad checking the softmax layer with L2 loss.\")\n- # Generate data\n- N = 3 # num examples\n- D = 10 # num classes\n- X = rand(rows=N, cols=D)\n- y = rand(rows=N, cols=D, min=0, max=1, pdf=\"uniform\")\n- y = y / rowSums(y)\n+ print(\" - Grad checking gamma.\")\n+ for (i in 1:nrow(gamma)) {\n+ for (j in 1:ncol(gamma)) {\n+ # Compute numerical derivative\n+ old = as.scalar(gamma[i,j])\n+ gamma[i,j] = old - h\n+ outmh = scale_shift1d::forward(X, gamma, beta)\n+ lossmh = l2_loss::forward(outmh, y)\n+ gamma[i,j] = old + h\n+ outph = scale_shift1d::forward(X, gamma, beta)\n+ lossph = l2_loss::forward(outph, y)\n+ gamma[i,j] = old # reset\n+ dgamma_num = (lossph-lossmh) / (2*h) # numerical derivative\n- # Compute analytical gradients of loss wrt parameters\n- out = softmax::forward(X)\n- dout = l2_loss::backward(out, y)\n- dX = softmax::backward(dout, X)\n+ # Check error\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dgamma[i,j]), dgamma_num,\n+ lossph, lossmh)\n+ }\n+ }\n- # Grad check\n- h = 1e-5\n- for (i in 1:nrow(X)) {\n- for (j in 1:ncol(X)) {\n+ print(\" - Grad checking beta.\")\n+ for (i in 1:nrow(beta)) {\n+ for (j in 1:ncol(beta)) {\n# Compute numerical derivative\n- old = as.scalar(X[i,j])\n- X[i,j] = old - h\n- outmh = softmax::forward(X)\n+ old = as.scalar(beta[i,j])\n+ beta[i,j] = old - h\n+ outmh = scale_shift1d::forward(X, gamma, beta)\nlossmh = l2_loss::forward(outmh, y)\n- X[i,j] = old + h\n- outph = softmax::forward(X)\n+ beta[i,j] = old + h\n+ outph = scale_shift1d::forward(X, gamma, beta)\nlossph = l2_loss::forward(outph, y)\n- X[i,j] = old # reset\n- dX_num = (lossph-lossmh) / (2*h) # numerical derivative\n+ beta[i,j] = old # reset\n+ dbeta_num = (lossph-lossmh) / (2*h) # numerical derivative\n# Check error\n- rel_error = test_util::check_rel_grad_error(as.scalar(dX[i,j]), dX_num, lossph, lossmh)\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dbeta[i,j]), dbeta_num,\n+ lossph, lossmh)\n}\n}\n}\n-batch_norm2d = function() {\n+scale_shift2d = function() {\n/*\n- * Gradient check for the 2D (spatial) batch normalization layer.\n+ * Gradient check for the 2D scale & shift layer.\n*/\n- print(\"Grad checking the 2D (spatial) batch normalization layer with L2 loss.\")\n+ print(\"Grad checking the 2D scale & shift layer with L2 loss.\")\n# Generate data\nN = 3 # num examples\n- N = 2 # num examples\nC = 2 # num channels\nHin = 5 # input height\nWin = 5 # input width\n- mu = 0.9 # momentum\n- eps = 1e-5 # epsilon\nX = rand(rows=N, cols=C*Hin*Win)\ny = rand(rows=N, cols=C*Hin*Win)\n- gamma = rand(rows=C, cols=1)\n- beta = rand(rows=C, cols=1)\n- ema_mean = rand(rows=C, cols=1)\n- ema_var = rand(rows=C, cols=1)\n- #[dummy, dummy, ema_mean, ema_var] = batch_norm2d::init(C)\n-\n- # Check training & testing modes\n- for (i in 1:2) {\n- if (i == 1)\n- mode = 'train'\n- else\n- mode = 'test'\n- print(\" - Grad checking the '\"+mode+\"' mode.\")\n+ [gamma, beta] = scale_shift2d::init(C)\n# Compute analytical gradients of loss wrt parameters\n- [out, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ out = scale_shift2d::forward(X, gamma, beta, C, Hin, Win)\ndout = l2_loss::backward(out, y)\n- [dX, dgamma, dbeta] = batch_norm2d::backward(dout, out, ema_mean_upd, ema_var_upd,\n- cache_mean, cache_var, cache_norm,\n- X, gamma, beta, C, Hin, Win, mode,\n- ema_mean, ema_var, mu, eps)\n+ [dX, dgamma, dbeta] = scale_shift2d::backward(dout, out, X, gamma, beta, C, Hin, Win)\n# Grad check\nh = 1e-5\n@@ -1323,12 +1416,10 @@ batch_norm2d = function() {\n# Compute numerical derivative\nold = as.scalar(X[i,j])\nX[i,j] = old - h\n- [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ outmh = scale_shift2d::forward(X, gamma, beta, C, Hin, Win)\nlossmh = l2_loss::forward(outmh, y)\nX[i,j] = old + h\n- [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ outph = scale_shift2d::forward(X, gamma, beta, C, Hin, Win)\nlossph = l2_loss::forward(outph, y)\nX[i,j] = old # reset\ndX_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -1344,12 +1435,10 @@ batch_norm2d = function() {\n# Compute numerical derivative\nold = as.scalar(gamma[i,j])\ngamma[i,j] = old - h\n- [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ outmh = scale_shift2d::forward(X, gamma, beta, C, Hin, Win)\nlossmh = l2_loss::forward(outmh, y)\ngamma[i,j] = old + h\n- [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ outph = scale_shift2d::forward(X, gamma, beta, C, Hin, Win)\nlossph = l2_loss::forward(outph, y)\ngamma[i,j] = old # reset\ndgamma_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -1366,12 +1455,10 @@ batch_norm2d = function() {\n# Compute numerical derivative\nold = as.scalar(beta[i,j])\nbeta[i,j] = old - h\n- [outmh, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ outmh = scale_shift2d::forward(X, gamma, beta, C, Hin, Win)\nlossmh = l2_loss::forward(outmh, y)\nbeta[i,j] = old + h\n- [outph, ema_mean_upd, ema_var_upd, cache_mean, cache_var, cache_norm] =\n- batch_norm2d::forward(X, gamma, beta, C, Hin, Win, mode, ema_mean, ema_var, mu, eps)\n+ outph = scale_shift2d::forward(X, gamma, beta, C, Hin, Win)\nlossph = l2_loss::forward(outph, y)\nbeta[i,j] = old # reset\ndbeta_num = (lossph-lossmh) / (2*h) # numerical derivative\n@@ -1382,6 +1469,82 @@ batch_norm2d = function() {\n}\n}\n}\n+\n+sigmoid = function() {\n+ /*\n+ * Gradient check for the sigmoid nonlinearity layer.\n+ */\n+ print(\"Grad checking the sigmoid nonlinearity layer with L2 loss.\")\n+\n+ # Generate data\n+ N = 3 # num examples\n+ M = 10 # num neurons\n+ X = rand(rows=N, cols=M)\n+ y = rand(rows=N, cols=M)\n+\n+ # Compute analytical gradients of loss wrt parameters\n+ out = sigmoid::forward(X)\n+ dout = l2_loss::backward(out, y)\n+ dX = sigmoid::backward(dout, X)\n+\n+ # Grad check\n+ h = 1e-5\n+ for (i in 1:nrow(X)) {\n+ for (j in 1:ncol(X)) {\n+ # Compute numerical derivative\n+ old = as.scalar(X[i,j])\n+ X[i,j] = old - h\n+ outmh = sigmoid::forward(X)\n+ lossmh = l2_loss::forward(outmh, y)\n+ X[i,j] = old + h\n+ outph = sigmoid::forward(X)\n+ lossph = l2_loss::forward(outph, y)\n+ X[i,j] = old # reset\n+ dX_num = (lossph-lossmh) / (2*h) # numerical derivative\n+\n+ # Check error\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dX[i,j]), dX_num, lossph, lossmh)\n+ }\n+ }\n+}\n+\n+softmax = function() {\n+ /*\n+ * Gradient check for the softmax layer.\n+ */\n+ print(\"Grad checking the softmax layer with L2 loss.\")\n+\n+ # Generate data\n+ N = 3 # num examples\n+ D = 10 # num classes\n+ X = rand(rows=N, cols=D)\n+ y = rand(rows=N, cols=D, min=0, max=1, pdf=\"uniform\")\n+ y = y / rowSums(y)\n+\n+ # Compute analytical gradients of loss wrt parameters\n+ out = softmax::forward(X)\n+ dout = l2_loss::backward(out, y)\n+ dX = softmax::backward(dout, X)\n+\n+ # Grad check\n+ h = 1e-5\n+ for (i in 1:nrow(X)) {\n+ for (j in 1:ncol(X)) {\n+ # Compute numerical derivative\n+ old = as.scalar(X[i,j])\n+ X[i,j] = old - h\n+ outmh = softmax::forward(X)\n+ lossmh = l2_loss::forward(outmh, y)\n+ X[i,j] = old + h\n+ outph = softmax::forward(X)\n+ lossph = l2_loss::forward(outph, y)\n+ X[i,j] = old # reset\n+ dX_num = (lossph-lossmh) / (2*h) # numerical derivative\n+\n+ # Check error\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dX[i,j]), dX_num, lossph, lossmh)\n+ }\n+ }\n}\ntanh = function() {\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/test/run_tests.dml",
"new_path": "scripts/staging/SystemML-NN/nn/test/run_tests.dml",
"diff": "@@ -29,34 +29,39 @@ print(\"\")\nprint(\"Starting grad checks.\")\nprint(\"---\")\n-# Loss functions\n+# Loss & loss-related functions\ntmp = grad_check::cross_entropy_loss()\ntmp = grad_check::l1_loss()\n+tmp = grad_check::l1_reg()\ntmp = grad_check::l2_loss()\n+tmp = grad_check::l2_reg()\ntmp = grad_check::log_loss()\n+print(\"\")\n-# Other layers\n+# Core layers\ntmp = grad_check::affine()\ntmp = grad_check::batch_norm1d()\ntmp = grad_check::batch_norm2d()\n-tmp = grad_check::conv2d_simple()\ntmp = grad_check::conv2d()\ntmp = grad_check::conv2d_builtin()\n+tmp = grad_check::conv2d_simple()\ntmp = grad_check::dropout()\n-tmp = grad_check::l1_reg()\n-tmp = grad_check::l2_reg()\ntmp = grad_check::lstm()\n-tmp = grad_check::max_pool2d_simple()\ntmp = grad_check::max_pool2d()\ntmp = grad_check::max_pool2d_builtin()\n+tmp = grad_check::max_pool2d_simple()\ntmp = grad_check::relu()\ntmp = grad_check::rnn()\n+tmp = grad_check::scale_shift1d()\n+tmp = grad_check::scale_shift2d()\ntmp = grad_check::sigmoid()\ntmp = grad_check::softmax()\ntmp = grad_check::tanh()\n+print(\"\")\n# Example model\ntmp = grad_check::two_layer_affine_l2_net()\n+print(\"\")\nprint(\"---\")\nprint(\"Grad checks complete -- look for any ERRORs or WARNINGs.\")\n@@ -71,11 +76,11 @@ print(\"---\")\ntmp = test::batch_norm1d()\ntmp = test::batch_norm2d()\n-tmp = test::im2col()\n-tmp = test::padding()\ntmp = test::conv2d()\ntmp = test::cross_entropy_loss()\n+tmp = test::im2col()\ntmp = test::max_pool2d()\n+tmp = test::padding()\ntmp = test::tanh()\nprint(\"---\")\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1468] Add new 1D/2D "Scale & Shift" layers
A "Scale & Shift" layer introduces learnable parameters
(`gamma`, `beta`) to scale and shift the input on either
a per-feature basis (1D) or a per-channel basis (2D).
`y = x*gamma + beta`
Closes #453. |
49,738 | 11.04.2017 20:40:07 | 25,200 | f9f70b3a216a77414d81fe44dcb3a25cbc8a902d | Additional unary/binary codegen row vector primitives
This patch adds compiler and runtime support for the following commonly
used row vector primitives: abs, round, ceil, floor, sign, pow2, mult2,
sqrt, min, max, plus, pow. Furthermore, this also includes additional
tests for complex rowwise fusion patterns. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeBinary.java",
"diff": "@@ -29,10 +29,12 @@ public class CNodeBinary extends CNode\n{\npublic enum BinType {\nDOT_PRODUCT,\n- VECT_MULT_ADD, VECT_DIV_ADD, VECT_MINUS_ADD,\n+ VECT_MULT_ADD, VECT_DIV_ADD, VECT_MINUS_ADD, VECT_PLUS_ADD,\n+ VECT_POW_ADD, VECT_MIN_ADD, VECT_MAX_ADD,\nVECT_EQUAL_ADD, VECT_NOTEQUAL_ADD, VECT_LESS_ADD,\nVECT_LESSEQUAL_ADD, VECT_GREATER_ADD, VECT_GREATEREQUAL_ADD,\n- VECT_MULT_SCALAR, VECT_DIV_SCALAR, VECT_MINUS_SCALAR,\n+ VECT_MULT_SCALAR, VECT_DIV_SCALAR, VECT_MINUS_SCALAR, VECT_PLUS_SCALAR,\n+ VECT_POW_SCALAR, VECT_MIN_SCALAR, VECT_MAX_SCALAR,\nVECT_EQUAL_SCALAR, VECT_NOTEQUAL_SCALAR, VECT_LESS_SCALAR,\nVECT_LESSEQUAL_SCALAR, VECT_GREATER_SCALAR, VECT_GREATEREQUAL_SCALAR,\nMULT, DIV, PLUS, MINUS, MODULUS, INTDIV,\n@@ -61,6 +63,11 @@ public class CNodeBinary extends CNode\ncase VECT_MULT_ADD:\ncase VECT_DIV_ADD:\n+ case VECT_MINUS_ADD:\n+ case VECT_PLUS_ADD:\n+ case VECT_POW_ADD:\n+ case VECT_MIN_ADD:\n+ case VECT_MAX_ADD:\ncase VECT_EQUAL_ADD:\ncase VECT_NOTEQUAL_ADD:\ncase VECT_LESS_ADD:\n@@ -72,9 +79,13 @@ public class CNodeBinary extends CNode\n\" LibSpoofPrimitives.vect\"+vectName+\"Add(%IN1%, %IN2%, %OUT%, %POS1%, %POSOUT%, %LEN%);\\n\";\n}\n+ case VECT_MULT_SCALAR:\ncase VECT_DIV_SCALAR:\ncase VECT_MINUS_SCALAR:\n- case VECT_MULT_SCALAR:\n+ case VECT_PLUS_SCALAR:\n+ case VECT_POW_SCALAR:\n+ case VECT_MIN_SCALAR:\n+ case VECT_MAX_SCALAR:\ncase VECT_EQUAL_SCALAR:\ncase VECT_NOTEQUAL_SCALAR:\ncase VECT_LESS_SCALAR:\n@@ -133,7 +144,10 @@ public class CNodeBinary extends CNode\n}\n}\npublic boolean isVectorScalarPrimitive() {\n- return this == VECT_DIV_SCALAR || this == VECT_MULT_SCALAR || this == VECT_MINUS_SCALAR\n+ return this == VECT_DIV_SCALAR || this == VECT_MULT_SCALAR\n+ || this == VECT_MINUS_SCALAR || this == VECT_PLUS_SCALAR\n+ || this == VECT_POW_SCALAR\n+ || this == VECT_MIN_SCALAR || this == VECT_MAX_SCALAR\n|| this == VECT_EQUAL_SCALAR || this == VECT_NOTEQUAL_SCALAR\n|| this == VECT_LESS_SCALAR || this == VECT_LESSEQUAL_SCALAR\n|| this == VECT_GREATER_SCALAR || this == VECT_GREATEREQUAL_SCALAR;\n@@ -215,6 +229,10 @@ public class CNodeBinary extends CNode\ncase VECT_MULT_ADD: return \"b(vma)\";\ncase VECT_DIV_ADD: return \"b(vda)\";\ncase VECT_MINUS_ADD: return \"b(vmia)\";\n+ case VECT_PLUS_ADD: return \"b(vpa)\";\n+ case VECT_POW_ADD: return \"b(vpowa)\";\n+ case VECT_MIN_ADD: return \"b(vmina)\";\n+ case VECT_MAX_ADD: return \"b(vmaxa)\";\ncase VECT_EQUAL_ADD: return \"b(veqa)\";\ncase VECT_NOTEQUAL_ADD: return \"b(vneqa)\";\ncase VECT_LESS_ADD: return \"b(vlta)\";\n@@ -224,6 +242,10 @@ public class CNodeBinary extends CNode\ncase VECT_MULT_SCALAR: return \"b(vm)\";\ncase VECT_DIV_SCALAR: return \"b(vd)\";\ncase VECT_MINUS_SCALAR: return \"b(vmi)\";\n+ case VECT_PLUS_SCALAR: return \"b(vp)\";\n+ case VECT_POW_SCALAR: return \"b(vpow)\";\n+ case VECT_MIN_SCALAR: return \"b(vmin)\";\n+ case VECT_MAX_SCALAR: return \"b(vmax)\";\ncase VECT_EQUAL_SCALAR: return \"b(veq)\";\ncase VECT_NOTEQUAL_SCALAR: return \"b(vneq)\";\ncase VECT_LESS_SCALAR: return \"b(vlt)\";\n@@ -259,6 +281,10 @@ public class CNodeBinary extends CNode\ncase VECT_MULT_ADD:\ncase VECT_DIV_ADD:\ncase VECT_MINUS_ADD:\n+ case VECT_PLUS_ADD:\n+ case VECT_POW_ADD:\n+ case VECT_MIN_ADD:\n+ case VECT_MAX_ADD:\ncase VECT_EQUAL_ADD:\ncase VECT_NOTEQUAL_ADD:\ncase VECT_LESS_ADD:\n@@ -273,6 +299,10 @@ public class CNodeBinary extends CNode\ncase VECT_DIV_SCALAR:\ncase VECT_MULT_SCALAR:\ncase VECT_MINUS_SCALAR:\n+ case VECT_PLUS_SCALAR:\n+ case VECT_POW_SCALAR:\n+ case VECT_MIN_SCALAR:\n+ case VECT_MAX_SCALAR:\ncase VECT_EQUAL_SCALAR:\ncase VECT_NOTEQUAL_SCALAR:\ncase VECT_LESS_SCALAR:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"diff": "@@ -30,7 +30,8 @@ public class CNodeUnary extends CNode\npublic enum UnaryType {\nLOOKUP_R, LOOKUP_RC, LOOKUP0, //codegen specific\nROW_SUMS, ROW_MINS, ROW_MAXS, //codegen specific\n- VECT_EXP_SCALAR, VECT_LOG_SCALAR,\n+ VECT_EXP, VECT_POW2, VECT_MULT2, VECT_SQRT, VECT_LOG,\n+ VECT_ABS, VECT_ROUND, VECT_CEIL, VECT_FLOOR, VECT_SIGN,\nEXP, POW2, MULT2, SQRT, LOG, LOG_NZ,\nABS, ROUND, CEIL, FLOOR, SIGN,\nSIN, COS, TAN, ASIN, ACOS, ATAN,\n@@ -53,8 +54,16 @@ public class CNodeUnary extends CNode\n\" double %TMP% = LibSpoofPrimitives.vect\"+vectName+\"(%IN1%, %POS1%, %LEN%);\\n\";\n}\n- case VECT_EXP_SCALAR:\n- case VECT_LOG_SCALAR: {\n+ case VECT_EXP:\n+ case VECT_POW2:\n+ case VECT_MULT2:\n+ case VECT_SQRT:\n+ case VECT_LOG:\n+ case VECT_ABS:\n+ case VECT_ROUND:\n+ case VECT_CEIL:\n+ case VECT_FLOOR:\n+ case VECT_SIGN: {\nString vectName = getVectorPrimitiveName();\nreturn sparse ? \" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1v%, %IN1i%, %POS1%, %LEN%);\\n\" :\n\" double[] %TMP% = LibSpoofPrimitives.vect\"+vectName+\"Write(%IN1%, %POS1%, %LEN%);\\n\";\n@@ -112,8 +121,11 @@ public class CNodeUnary extends CNode\n}\n}\npublic boolean isVectorScalarPrimitive() {\n- return this == UnaryType.VECT_EXP_SCALAR\n- || this == UnaryType.VECT_LOG_SCALAR;\n+ return this == VECT_EXP || this == VECT_POW2\n+ || this == VECT_MULT2 || this == VECT_SQRT\n+ || this == VECT_LOG || this == VECT_ABS\n+ || this == VECT_ROUND || this == VECT_CEIL\n+ || this == VECT_FLOOR || this == VECT_SIGN;\n}\npublic UnaryType getVectorAddPrimitive() {\nreturn UnaryType.valueOf(\"VECT_\"+getVectorPrimitiveName().toUpperCase()+\"_ADD\");\n@@ -184,8 +196,16 @@ public class CNodeUnary extends CNode\ncase ROW_SUMS: return \"u(R+)\";\ncase ROW_MINS: return \"u(Rmin)\";\ncase ROW_MAXS: return \"u(Rmax)\";\n- case VECT_EXP_SCALAR: return \"u(vexp)\";\n- case VECT_LOG_SCALAR: return \"u(vlog)\";\n+ case VECT_EXP:\n+ case VECT_POW2:\n+ case VECT_MULT2:\n+ case VECT_SQRT:\n+ case VECT_LOG:\n+ case VECT_ABS:\n+ case VECT_ROUND:\n+ case VECT_CEIL:\n+ case VECT_FLOOR:\n+ case VECT_SIGN: return \"u(v\"+_type.name().toLowerCase()+\")\";\ncase LOOKUP_R: return \"u(ixr)\";\ncase LOOKUP_RC: return \"u(ixrc)\";\ncase LOOKUP0: return \"u(ix0)\";\n@@ -197,8 +217,16 @@ public class CNodeUnary extends CNode\n@Override\npublic void setOutputDims() {\nswitch(_type) {\n- case VECT_EXP_SCALAR:\n- case VECT_LOG_SCALAR:\n+ case VECT_EXP:\n+ case VECT_POW2:\n+ case VECT_MULT2:\n+ case VECT_SQRT:\n+ case VECT_LOG:\n+ case VECT_ABS:\n+ case VECT_ROUND:\n+ case VECT_CEIL:\n+ case VECT_FLOOR:\n+ case VECT_SIGN:\n_rows = _inputs.get(0)._rows;\n_cols = _inputs.get(0)._cols;\n_dataType= DataType.MATRIX;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"diff": "@@ -57,8 +57,10 @@ import org.apache.sysml.runtime.matrix.data.Pair;\npublic class TemplateRow extends TemplateBase\n{\nprivate static final Hop.AggOp[] SUPPORTED_ROW_AGG = new AggOp[]{AggOp.SUM, AggOp.MIN, AggOp.MAX};\n- private static final Hop.OpOp1[] SUPPORTED_VECT_UNARY = new OpOp1[]{OpOp1.EXP, OpOp1.LOG};\n- private static final Hop.OpOp2[] SUPPORTED_VECT_BINARY = new OpOp2[]{OpOp2.MULT, OpOp2.DIV, OpOp2.MINUS,\n+ private static final Hop.OpOp1[] SUPPORTED_VECT_UNARY = new OpOp1[]{\n+ OpOp1.EXP, OpOp1.SQRT, OpOp1.LOG, OpOp1.ABS, OpOp1.ROUND, OpOp1.CEIL, OpOp1.FLOOR, OpOp1.SIGN};\n+ private static final Hop.OpOp2[] SUPPORTED_VECT_BINARY = new OpOp2[]{\n+ OpOp2.MULT, OpOp2.DIV, OpOp2.MINUS, OpOp2.PLUS, OpOp2.POW, OpOp2.MIN, OpOp2.MAX,\nOpOp2.EQUAL, OpOp2.NOTEQUAL, OpOp2.LESS, OpOp2.LESSEQUAL, OpOp2.GREATER, OpOp2.GREATEREQUAL};\npublic TemplateRow() {\n@@ -216,7 +218,7 @@ public class TemplateRow extends TemplateBase\nif(hop.getInput().get(0).getDim1() > 1 && hop.getInput().get(0).getDim2() > 1 )\n{\nif( HopRewriteUtils.isUnary(hop, SUPPORTED_VECT_UNARY) ) {\n- String opname = \"VECT_\"+((UnaryOp)hop).getOp().name()+\"_SCALAR\";\n+ String opname = \"VECT_\"+((UnaryOp)hop).getOp().name();\nout = new CNodeUnary(cdata1, UnaryType.valueOf(opname));\n}\nelse\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/LibSpoofPrimitives.java",
"diff": "@@ -202,6 +202,110 @@ public class LibSpoofPrimitives\nreturn c;\n}\n+ //custom vector plus\n+\n+ public static void vectPlusAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += a[j] + bval;\n+ }\n+\n+ public static void vectPlusAdd(double[] a, double bval, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += a[j] + bval;\n+ }\n+\n+ public static double[] vectPlusWrite(double[] a, double bval, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = a[ai] + bval;\n+ return c;\n+ }\n+\n+ public static double[] vectPlusWrite(double[] a, double bval, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = a[j] + bval;\n+ return c;\n+ }\n+\n+ //custom vector pow\n+\n+ public static void vectPowAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += Math.pow(a[j], bval);\n+ }\n+\n+ public static void vectPowAdd(double[] a, double bval, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += Math.pow(a[j], bval);\n+ }\n+\n+ public static double[] vectPowWrite(double[] a, double bval, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = Math.pow(a[ai], bval);\n+ return c;\n+ }\n+\n+ public static double[] vectPowWrite(double[] a, double bval, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = Math.pow(a[j], bval);\n+ return c;\n+ }\n+\n+ //custom vector min\n+\n+ public static void vectMinAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += Math.min(a[j], bval);\n+ }\n+\n+ public static void vectMinAdd(double[] a, double bval, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += Math.min(a[j], bval);\n+ }\n+\n+ public static double[] vectMinWrite(double[] a, double bval, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = Math.min(a[ai], bval);\n+ return c;\n+ }\n+\n+ public static double[] vectMinWrite(double[] a, double bval, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = Math.min(a[j], bval);\n+ return c;\n+ }\n+\n+ //custom vector max\n+\n+ public static void vectMaxAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += Math.max(a[j], bval);\n+ }\n+\n+ public static void vectMaxAdd(double[] a, double bval, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += Math.max(a[j], bval);\n+ }\n+\n+ public static double[] vectMaxWrite(double[] a, double bval, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = Math.max(a[ai], bval);\n+ return c;\n+ }\n+\n+ public static double[] vectMaxWrite(double[] a, double bval, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = Math.max(a[j], bval);\n+ return c;\n+ }\n+\n//custom exp\npublic static void vectExpAdd(double[] a, double[] c, int ai, int ci, int len) {\n@@ -254,6 +358,214 @@ public class LibSpoofPrimitives\nreturn c;\n}\n+ //custom abs\n+\n+ public static void vectAbsAdd(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += Math.abs(a[j]);\n+ }\n+\n+ public static void vectAbsAdd(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += Math.log(a[j]);\n+ }\n+\n+ public static double[] vectAbsWrite(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = Math.log(a[ai]);\n+ return c;\n+ }\n+\n+ public static double[] vectAbsWrite(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = Math.log(a[j]);\n+ return c;\n+ }\n+\n+ //custom round\n+\n+ public static void vectRoundAdd(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += Math.round(a[j]);\n+ }\n+\n+ public static void vectRoundAdd(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += Math.round(a[j]);\n+ }\n+\n+ public static double[] vectRoundWrite(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = Math.round(a[ai]);\n+ return c;\n+ }\n+\n+ public static double[] vectRoundWrite(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = Math.round(a[j]);\n+ return c;\n+ }\n+\n+ //custom ceil\n+\n+ public static void vectCeilAdd(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += FastMath.ceil(a[j]);\n+ }\n+\n+ public static void vectCeilAdd(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += FastMath.ceil(a[j]);\n+ }\n+\n+ public static double[] vectCeilWrite(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = FastMath.ceil(a[ai]);\n+ return c;\n+ }\n+\n+ public static double[] vectCeilWrite(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = FastMath.ceil(a[j]);\n+ return c;\n+ }\n+\n+ //custom floor\n+\n+ public static void vectFloorAdd(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += FastMath.floor(a[j]);\n+ }\n+\n+ public static void vectFloorAdd(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += FastMath.floor(a[j]);\n+ }\n+\n+ public static double[] vectFloorWrite(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = FastMath.floor(a[ai]);\n+ return c;\n+ }\n+\n+ public static double[] vectFloorWrite(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = FastMath.floor(a[j]);\n+ return c;\n+ }\n+\n+ //custom sign\n+\n+ public static void vectSignAdd(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += FastMath.signum(a[j]);\n+ }\n+\n+ public static void vectSignAdd(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += FastMath.signum(a[j]);\n+ }\n+\n+ public static double[] vectSignWrite(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = FastMath.signum(a[ai]);\n+ return c;\n+ }\n+\n+ public static double[] vectSignWrite(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = FastMath.signum(a[j]);\n+ return c;\n+ }\n+\n+ //custom pow2\n+\n+ public static void vectPow2Add(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += a[j] * a[j];\n+ }\n+\n+ public static void vectPow2Add(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += a[j] * a[j];\n+ }\n+\n+ public static double[] vectPow2Write(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = a[ai] * a[ai];\n+ return c;\n+ }\n+\n+ public static double[] vectPow2Write(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = a[j] * a[j];\n+ return c;\n+ }\n+\n+ //custom mult2\n+\n+ public static void vectMult2Add(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += a[j] + a[j];\n+ }\n+\n+ public static void vectMult2Add(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += a[j] + a[j];\n+ }\n+\n+ public static double[] vectMult2Write(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = a[ai] + a[ai];\n+ return c;\n+ }\n+\n+ public static double[] vectMult2Write(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = a[j] + a[j];\n+ return c;\n+ }\n+\n+ //custom sqrt\n+\n+ public static void vectSqrtAdd(double[] a, double[] c, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++, ci++)\n+ c[ci] += Math.sqrt(a[j]);\n+ }\n+\n+ public static void vectSqrtAdd(double[] a, double[] c, int[] aix, int ai, int ci, int len) {\n+ for( int j = ai; j < ai+len; j++ )\n+ c[ci + aix[j]] += Math.sqrt(a[j]);\n+ }\n+\n+ public static double[] vectSqrtWrite(double[] a, int ai, int len) {\n+ double[] c = allocVector(len, false);\n+ for( int j = 0; j < len; j++, ai++)\n+ c[j] = Math.sqrt(a[ai]);\n+ return c;\n+ }\n+\n+ public static double[] vectSqrtWrite(double[] a, int[] aix, int ai, int len) {\n+ double[] c = allocVector(len, true);\n+ for( int j = ai; j < ai+len; j++ )\n+ c[aix[j]] = Math.sqrt(a[j]);\n+ return c;\n+ }\n+\n//custom vector equal\npublic static void vectEqualAdd(double[] a, double bval, double[] c, int ai, int ci, int len) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java",
"diff": "@@ -49,6 +49,8 @@ public class RowAggTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME11 = TEST_NAME+\"11\"; //y - X %*% v\nprivate static final String TEST_NAME12 = TEST_NAME+\"12\"; //Y=(X>=v); R=Y/rowSums(Y)\nprivate static final String TEST_NAME13 = TEST_NAME+\"13\"; //rowSums(X)+rowSums(Y)\n+ private static final String TEST_NAME14 = TEST_NAME+\"14\"; //colSums(max(floor(round(abs(min(sign(X+Y),1)))),7))\n+ private static final String TEST_NAME15 = TEST_NAME+\"15\"; //systemml nn - softmax backward (partially)\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RowAggTmplTest.class.getSimpleName() + \"/\";\n@@ -60,7 +62,7 @@ public class RowAggTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for(int i=1; i<=13; i++)\n+ for(int i=1; i<=15; i++)\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME+i, new String[] { String.valueOf(i) }) );\n}\n@@ -259,6 +261,36 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME13, false, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenRowAggRewrite14CP() {\n+ testCodegenIntegration( TEST_NAME14, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg14CP() {\n+ testCodegenIntegration( TEST_NAME14, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg14SP() {\n+ testCodegenIntegration( TEST_NAME14, false, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAggRewrite15CP() {\n+ testCodegenIntegration( TEST_NAME15, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg15CP() {\n+ testCodegenIntegration( TEST_NAME15, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg15SP() {\n+ testCodegenIntegration( TEST_NAME15, false, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern14.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+\n+X = matrix(seq(1,1500), 150, 10, byrow=TRUE);\n+y = seq(1,150);\n+\n+Z = pmax(floor(round(abs(pmin(sign(X+y),1)))),7);\n+R = t(colSums(Z));\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern14.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(1,1500), rows=150, cols=10);\n+y = seq(1,150);\n+\n+Z = max(floor(round(abs(min(sign(X+y),1)))),7)\n+R = colSums(Z);\n+\n+write(R, $1)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern15.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+\n+X = matrix(seq(1,1500), 150, 10, byrow=TRUE);\n+\n+Y1 = X - rowMaxs(X)\n+Y2 = exp(Y1)\n+Y3 = Y2 / rowSums(Y2)\n+R = Y3 * rowSums(Y3)\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern15.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(1,1500), rows=150, cols=10);\n+\n+Y1 = X - rowMaxs(X)\n+Y2 = exp(Y1)\n+Y3 = Y2 / rowSums(Y2)\n+R = Y3 * rowSums(Y3)\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1513] Additional unary/binary codegen row vector primitives
This patch adds compiler and runtime support for the following commonly
used row vector primitives: abs, round, ceil, floor, sign, pow2, mult2,
sqrt, min, max, plus, pow. Furthermore, this also includes additional
tests for complex rowwise fusion patterns. |
49,738 | 12.04.2017 22:29:47 | 25,200 | a2c1e75975674f7f2093f6465b8c913565addca8 | [MINOR] Fix codegen multi-aggregate min/max result initialization
This patch fixes an issue of proper min/max result initialization for
the case of multi-threaded execution, which was not caught by the
testsuite before because multi-threading is only applied for inputs w/
>1M cells. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java",
"diff": "@@ -85,6 +85,7 @@ public abstract class SpoofMultiAggregate extends SpoofOperator implements Seria\nout.reset(1, _aggOps.length, false);\nout.allocateDenseBlock();\ndouble[] c = out.getDenseBlock();\n+ setInitialOutputValues(c);\n//input preparation\ndouble[][] b = prepInputMatrices(inputs);\n@@ -94,7 +95,6 @@ public abstract class SpoofMultiAggregate extends SpoofOperator implements Seria\nif( k <= 1 ) //SINGLE-THREADED\n{\n- setInitialOutputValues(c);\nif( !inputs.get(0).isInSparseFormat() )\nexecuteDense(inputs.get(0).getDenseBlock(), b, scalars, c, m, n, 0, m);\nelse\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix codegen multi-aggregate min/max result initialization
This patch fixes an issue of proper min/max result initialization for
the case of multi-threaded execution, which was not caught by the
testsuite before because multi-threading is only applied for inputs w/
>1M cells. |
49,768 | 13.04.2017 15:15:38 | 25,200 | fe6d887420143277aa8930cbea6d43a460ae7789 | [maven-release-plugin] prepare release v0.14.0-incubating-rc3 | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>0.14.0-incubating-SNAPSHOT</version>\n+ <version>0.14.0-incubating</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:git@github.com:apache/incubator-systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=incubator-systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.14.0-incubating-rc3</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [maven-release-plugin] prepare release v0.14.0-incubating-rc3 |
49,738 | 13.04.2017 14:18:47 | 25,200 | b70ee45330a457e3f0ee61c499306fb2518997c7 | Generalized codegen cell template (sideways row vectors)
This patch generalizes the existing codegen cell template by allowing
sideways row vectors, i.e., matrix-row vector binary operations, in
addition to sideways column vectors and matrices. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"diff": "@@ -41,8 +41,6 @@ import org.apache.sysml.hops.codegen.cplan.CNodeOuterProduct;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTernary;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTernary.TernaryType;\nimport org.apache.sysml.hops.codegen.cplan.CNodeTpl;\n-import org.apache.sysml.hops.codegen.cplan.CNodeUnary;\n-import org.apache.sysml.hops.codegen.cplan.CNodeUnary.UnaryType;\nimport org.apache.sysml.hops.codegen.template.TemplateBase;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.CloseType;\nimport org.apache.sysml.hops.codegen.template.TemplateBase.TemplateType;\n@@ -641,9 +639,7 @@ public class SpoofCompiler\nprivate static void rFindAndRemoveLookup(CNode node, CNodeData mainInput) {\nfor( int i=0; i<node.getInput().size(); i++ ) {\nCNode tmp = node.getInput().get(i);\n- if( tmp instanceof CNodeUnary && (((CNodeUnary)tmp).getType()==UnaryType.LOOKUP_R\n- || ((CNodeUnary)tmp).getType()==UnaryType.LOOKUP_RC)\n- && tmp.getInput().get(0) instanceof CNodeData\n+ if( TemplateUtils.isLookup(tmp) && tmp.getInput().get(0) instanceof CNodeData\n&& ((CNodeData)tmp.getInput().get(0)).getHopID()==mainInput.getHopID() )\n{\nnode.getInput().set(i, tmp.getInput().get(0));\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeUnary.java",
"diff": "@@ -28,7 +28,7 @@ import org.apache.sysml.parser.Expression.DataType;\npublic class CNodeUnary extends CNode\n{\npublic enum UnaryType {\n- LOOKUP_R, LOOKUP_RC, LOOKUP0, //codegen specific\n+ LOOKUP_R, LOOKUP_C, LOOKUP_RC, LOOKUP0, //codegen specific\nROW_SUMS, ROW_MINS, ROW_MAXS, //codegen specific\nVECT_EXP, VECT_POW2, VECT_MULT2, VECT_SQRT, VECT_LOG,\nVECT_ABS, VECT_ROUND, VECT_CEIL, VECT_FLOOR, VECT_SIGN,\n@@ -73,6 +73,8 @@ public class CNodeUnary extends CNode\nreturn \" double %TMP% = FastMath.exp(%IN1%);\\n\";\ncase LOOKUP_R:\nreturn \" double %TMP% = getValue(%IN1%, rowIndex);\\n\";\n+ case LOOKUP_C:\n+ return \" double %TMP% = getValue(%IN1%, colIndex);\\n\";\ncase LOOKUP_RC:\nreturn \" double %TMP% = getValue(%IN1%, rowIndex*n+colIndex);\\n\";\ncase LOOKUP0:\n@@ -207,6 +209,7 @@ public class CNodeUnary extends CNode\ncase VECT_FLOOR:\ncase VECT_SIGN: return \"u(v\"+_type.name().toLowerCase()+\")\";\ncase LOOKUP_R: return \"u(ixr)\";\n+ case LOOKUP_C: return \"u(ixc)\";\ncase LOOKUP_RC: return \"u(ixrc)\";\ncase LOOKUP0: return \"u(ix0)\";\ncase POW2: return \"^2\";\n@@ -237,6 +240,7 @@ public class CNodeUnary extends CNode\ncase ROW_MAXS:\ncase EXP:\ncase LOOKUP_R:\n+ case LOOKUP_C:\ncase LOOKUP_RC:\ncase LOOKUP0:\ncase POW2:\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateCell.java",
"diff": "@@ -165,10 +165,7 @@ public class TemplateCell extends TemplateBase\nif(hop instanceof UnaryOp)\n{\nCNode cdata1 = tmp.get(hop.getInput().get(0).getHopID());\n- if( TemplateUtils.isColVector(cdata1) )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\nString primitiveOpName = ((UnaryOp)hop).getOp().name();\nout = new CNodeUnary(cdata1, UnaryType.valueOf(primitiveOpName));\n@@ -180,17 +177,9 @@ public class TemplateCell extends TemplateBase\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\nString primitiveOpName = bop.getOp().name();\n- //cdata1 is vector\n- if( TemplateUtils.isColVector(cdata1) )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n-\n- //cdata2 is vector\n- if( TemplateUtils.isColVector(cdata2) )\n- cdata2 = new CNodeUnary(cdata2, UnaryType.LOOKUP_R);\n- else if( cdata2 instanceof CNodeData && hop.getInput().get(1).getDataType().isMatrix() )\n- cdata2 = new CNodeUnary(cdata2, UnaryType.LOOKUP_RC);\n+ //add lookups if required\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\n+ cdata2 = TemplateUtils.wrapLookupIfNecessary(cdata2, hop.getInput().get(1));\nif( bop.getOp()==OpOp2.POW && cdata2.isLiteral() && cdata2.getVarname().equals(\"2\") )\nout = new CNodeUnary(cdata1, UnaryType.POW2);\n@@ -206,17 +195,9 @@ public class TemplateCell extends TemplateBase\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\nCNode cdata3 = tmp.get(hop.getInput().get(2).getHopID());\n- //cdata1 is vector\n- if( TemplateUtils.isColVector(cdata1) )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n-\n- //cdata3 is vector\n- if( TemplateUtils.isColVector(cdata3) )\n- cdata3 = new CNodeUnary(cdata3, UnaryType.LOOKUP_R);\n- else if( cdata3 instanceof CNodeData && hop.getInput().get(2).getDataType().isMatrix() )\n- cdata3 = new CNodeUnary(cdata3, UnaryType.LOOKUP_RC);\n+ //add lookups if required\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\n+ cdata3 = TemplateUtils.wrapLookupIfNecessary(cdata3, hop.getInput().get(2));\n//construct ternary cnode, primitive operation derived from OpOp3\nout = new CNodeTernary(cdata1, cdata2, cdata3,\n@@ -225,10 +206,7 @@ public class TemplateCell extends TemplateBase\nelse if( hop instanceof ParameterizedBuiltinOp )\n{\nCNode cdata1 = tmp.get(((ParameterizedBuiltinOp)hop).getTargetHop().getHopID());\n- if( TemplateUtils.isColVector(cdata1) )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\nCNode cdata2 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"pattern\").getHopID());\nCNode cdata3 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"replacement\").getHopID());\n@@ -290,8 +268,8 @@ public class TemplateCell extends TemplateBase\nisBinaryMatrixScalar = (ldt.isScalar() || rdt.isScalar());\nisBinaryMatrixVector = hop.dimsKnown()\n- && ((ldt.isMatrix() && TemplateUtils.isVectorOrScalar(right) && !TemplateUtils.isBinaryMatrixRowVector(hop))\n- || (rdt.isMatrix() && TemplateUtils.isVectorOrScalar(left) && !TemplateUtils.isBinaryMatrixRowVector(hop)) );\n+ && ((ldt.isMatrix() && TemplateUtils.isVectorOrScalar(right))\n+ || (rdt.isMatrix() && TemplateUtils.isVectorOrScalar(left)) );\nisBinaryMatrixMatrixDense = hop.dimsKnown() && HopRewriteUtils.isEqualSize(left, right)\n&& ldt.isMatrix() && rdt.isMatrix() && !HopRewriteUtils.isSparse(left) && !HopRewriteUtils.isSparse(right);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"diff": "@@ -227,10 +227,7 @@ public class TemplateRow extends TemplateBase\n}\nelse //general scalar case\n{\n- if( TemplateUtils.isColVector(cdata1) )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\nString primitiveOpName = ((UnaryOp)hop).getOp().toString();\nout = new CNodeUnary(cdata1, UnaryType.valueOf(primitiveOpName));\n@@ -271,17 +268,9 @@ public class TemplateRow extends TemplateBase\nCNode cdata2 = tmp.get(hop.getInput().get(1).getHopID());\nCNode cdata3 = tmp.get(hop.getInput().get(2).getHopID());\n- //cdata1 is vector\n- if( TemplateUtils.isColVector(cdata1) )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n-\n- //cdata3 is vector\n- if( TemplateUtils.isColVector(cdata3) )\n- cdata3 = new CNodeUnary(cdata3, UnaryType.LOOKUP_R);\n- else if( cdata3 instanceof CNodeData && hop.getInput().get(2).getDataType().isMatrix() )\n- cdata3 = new CNodeUnary(cdata3, UnaryType.LOOKUP_RC);\n+ //add lookups if required\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\n+ cdata3 = TemplateUtils.wrapLookupIfNecessary(cdata3, hop.getInput().get(2));\n//construct ternary cnode, primitive operation derived from OpOp3\nout = new CNodeTernary(cdata1, cdata2, cdata3,\n@@ -290,10 +279,7 @@ public class TemplateRow extends TemplateBase\nelse if( hop instanceof ParameterizedBuiltinOp )\n{\nCNode cdata1 = tmp.get(((ParameterizedBuiltinOp)hop).getTargetHop().getHopID());\n- if( TemplateUtils.isColVector(cdata1) )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_R);\n- else if( cdata1 instanceof CNodeData && hop.getInput().get(0).getDataType().isMatrix() )\n- cdata1 = new CNodeUnary(cdata1, UnaryType.LOOKUP_RC);\n+ cdata1 = TemplateUtils.wrapLookupIfNecessary(cdata1, hop.getInput().get(0));\nCNode cdata2 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"pattern\").getHopID());\nCNode cdata3 = tmp.get(((ParameterizedBuiltinOp)hop).getParameterHop(\"replacement\").getHopID());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"diff": "@@ -74,6 +74,17 @@ public class TemplateUtils\n&& hop.getNumRows() == 1 && hop.getNumCols() != 1);\n}\n+ public static CNode wrapLookupIfNecessary(CNode node, Hop hop) {\n+ CNode ret = node;\n+ if( isColVector(node) )\n+ ret = new CNodeUnary(node, UnaryType.LOOKUP_R);\n+ else if( isRowVector(node) )\n+ ret = new CNodeUnary(node, UnaryType.LOOKUP_C);\n+ else if( node instanceof CNodeData && hop.getDataType().isMatrix() )\n+ ret = new CNodeUnary(node, UnaryType.LOOKUP_RC);\n+ return ret;\n+ }\n+\npublic static boolean isMatrix(Hop hop) {\nreturn (hop.getDataType() == DataType.MATRIX && hop.getDim1() != 1 && hop.getDim2()!=1);\n}\n@@ -256,9 +267,12 @@ public class TemplateUtils\n}\npublic static boolean isLookup(CNode node) {\n- return (node instanceof CNodeUnary\n- && (((CNodeUnary)node).getType()==UnaryType.LOOKUP_R\n- || ((CNodeUnary)node).getType()==UnaryType.LOOKUP_RC));\n+ return isUnary(node, UnaryType.LOOKUP_R, UnaryType.LOOKUP_C, UnaryType.LOOKUP_RC);\n+ }\n+\n+ public static boolean isUnary(CNode node, UnaryType...types) {\n+ return node instanceof CNodeUnary\n+ && ArrayUtils.contains(types, ((CNodeUnary)node).getType());\n}\npublic static CNodeData createCNodeData(Hop hop, boolean compileLiterals) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CellwiseTmplTest.java",
"diff": "@@ -49,7 +49,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME11 = TEST_NAME+11; //replace((0 / (X - 500))+1, 0/0, 7)\nprivate static final String TEST_NAME12 = TEST_NAME+12; //((X/3) %% 0.6) + ((X/3) %/% 0.6)\nprivate static final String TEST_NAME13 = TEST_NAME+13; //min(X + 7 * Y) large\n-\n+ private static final String TEST_NAME14 = TEST_NAME+14; //-2 * X + t(Y); t(Y) is rowvector\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + CellwiseTmplTest.class.getSimpleName() + \"/\";\n@@ -62,7 +62,7 @@ public class CellwiseTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for( int i=1; i<=13; i++ ) {\n+ for( int i=1; i<=14; i++ ) {\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(\nTEST_CLASS_DIR, TEST_NAME+i, new String[] {String.valueOf(i)}) );\n}\n@@ -134,6 +134,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME13, true, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite14() {\n+ testCodegenIntegration( TEST_NAME14, true, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwise1() {\ntestCodegenIntegration( TEST_NAME1, false, ExecType.CP );\n@@ -200,6 +205,11 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME13, false, ExecType.CP );\n}\n+ @Test\n+ public void testCodegenCellwise14() {\n+ testCodegenIntegration( TEST_NAME14, false, ExecType.CP );\n+ }\n+\n@Test\npublic void testCodegenCellwiseRewrite1_sp() {\ntestCodegenIntegration( TEST_NAME1, true, ExecType.SPARK );\n@@ -235,6 +245,16 @@ public class CellwiseTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME12, true, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenCellwiseRewrite13_sp() {\n+ testCodegenIntegration( TEST_NAME13, true, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCodegenCellwiseRewrite14_sp() {\n+ testCodegenIntegration( TEST_NAME14, true, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldRewrites = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl14.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+\n+X = matrix(seq(7, 2200006), 1100, 2000, byrow=TRUE);\n+Y = seq(1, 2000);\n+\n+R = -2 * X + (matrix(1,nrow(X),1) %*% t(Y));\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/cellwisetmpl14.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(7, 2200006), 1100, 2000);\n+Y = seq(1, 2000);\n+\n+R = -2 * X + t(Y);\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1515] Generalized codegen cell template (sideways row vectors)
This patch generalizes the existing codegen cell template by allowing
sideways row vectors, i.e., matrix-row vector binary operations, in
addition to sideways column vectors and matrices. |
49,738 | 14.04.2017 01:04:22 | 25,200 | 55e46e7cbeb46073d5d2f19fc4c3b59bdc01c7cb | Fix corrupted input file names in old/new mlcontext | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/MLContext.java",
"new_path": "src/main/java/org/apache/sysml/api/MLContext.java",
"diff": "@@ -518,7 +518,7 @@ public class MLContext {\nif( format.equals(\"csv\") ) {\nint blksz = ConfigurationManager.getBlocksize();\nMatrixCharacteristics mc = new MatrixCharacteristics(rlen, clen, blksz, blksz, nnz);\n- mo = new MatrixObject(ValueType.DOUBLE, null, new MatrixFormatMetaData(mc, OutputInfo.CSVOutputInfo, InputInfo.CSVInputInfo));\n+ mo = new MatrixObject(ValueType.DOUBLE, OptimizerUtils.getUniqueTempFileName(), new MatrixFormatMetaData(mc, OutputInfo.CSVOutputInfo, InputInfo.CSVInputInfo));\n}\nelse if( format.equals(\"text\") ) {\nif(rlen == -1 || clen == -1) {\n@@ -526,7 +526,7 @@ public class MLContext {\n}\nint blksz = ConfigurationManager.getBlocksize();\nMatrixCharacteristics mc = new MatrixCharacteristics(rlen, clen, blksz, blksz, nnz);\n- mo = new MatrixObject(ValueType.DOUBLE, null, new MatrixFormatMetaData(mc, OutputInfo.TextCellOutputInfo, InputInfo.TextCellInputInfo));\n+ mo = new MatrixObject(ValueType.DOUBLE, OptimizerUtils.getUniqueTempFileName(), new MatrixFormatMetaData(mc, OutputInfo.TextCellOutputInfo, InputInfo.TextCellInputInfo));\n}\nelse if( format.equals(\"mm\") ) {\n// TODO: Handle matrix market\n@@ -588,7 +588,7 @@ public class MLContext {\nif(rlen == -1 || clen == -1) {\nthrow new DMLRuntimeException(\"The metadata is required in registerInput for format:\" + format);\n}\n- fo = new FrameObject(null, new MatrixFormatMetaData(mc, OutputInfo.TextCellOutputInfo, InputInfo.TextCellInputInfo));\n+ fo = new FrameObject(OptimizerUtils.getUniqueTempFileName(), new MatrixFormatMetaData(mc, OutputInfo.TextCellOutputInfo, InputInfo.TextCellInputInfo));\n}\nelse {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/OptimizerUtils.java",
"diff": "@@ -41,6 +41,7 @@ import org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\n+import org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.functionobjects.IntegerDivide;\nimport org.apache.sysml.runtime.functionobjects.Modulus;\n@@ -885,7 +886,10 @@ public class OptimizerUtils\n* @return unique temp file name\n*/\npublic static String getUniqueTempFileName() {\n- return new Dag<Lop>().getNextUniqueFilename();\n+ return ConfigurationManager.getScratchSpace()\n+ + Lop.FILE_SEPARATOR + Lop.PROCESS_PREFIX + DMLScript.getUUID()\n+ + Lop.FILE_SEPARATOR + ProgramConverter.CP_ROOT_THREAD_ID + Lop.FILE_SEPARATOR\n+ + Dag.getNextUniqueFilenameSuffix();\n}\npublic static boolean allowsToFilterEmptyBlockOutputs( Hop hop )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java",
"new_path": "src/main/java/org/apache/sysml/lops/compile/Dag.java",
"diff": "@@ -205,8 +205,12 @@ public class Dag<N extends Lop>\nreturn scratchFilePath;\n}\n+ public static String getNextUniqueFilenameSuffix() {\n+ return \"temp\" + job_id.getNextID();\n+ }\n+\npublic String getNextUniqueFilename() {\n- return getFilePath() + \"temp\" + job_id.getNextID();\n+ return getFilePath() + getNextUniqueFilenameSuffix();\n}\npublic static String getNextUniqueVarname(DataType dt) {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1518] Fix corrupted input file names in old/new mlcontext |
49,738 | 14.04.2017 02:24:13 | 25,200 | 149562eca0bb1a438e34dc6e2d7993b44e2e612e | Fix sparse output corruption of row-wise rexpand | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java",
"diff": "@@ -1899,9 +1899,13 @@ public class LibMatrixReorg\nif( val == Math.floor(val) && val >= 1 && val <= max )\nret.appendValue((int)(val-1), i+tmpi[j], 1);\n}\n-\n}\n+ //ensure valid output sparse representation\n+ //(necessary due to cache-conscious processing w/ unstable sort)\n+ if( ret.isInSparseFormat() )\n+ ret.sortSparseRows();\n+\nreturn ret;\n}\n@@ -1940,7 +1944,7 @@ public class LibMatrixReorg\n{\n//copy value array from input matrix\nif( in.isEmptyBlock(false) ) {\n- Arrays.fill(tmp, 0, 0, len);\n+ Arrays.fill(tmp, 0, len, 0);\n}\nelse if( in.sparse ){ //SPARSE\nfor( int i=0; i<len; i++ )\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1520] Fix sparse output corruption of row-wise rexpand |
49,738 | 14.04.2017 20:57:51 | 25,200 | 1cd62866b7848bdb5eeded39259cd23b0ff172c8 | [SYSTEMML-1525,1526] Fix codegen size propagation and cplan cleanup
This patch fixes the output size propagation of row and multi-aggregate
templates as well as plan cleanups of multi-aggregate templates where
the output nodes themselves can be indexing operations. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofCompiler.java",
"diff": "@@ -583,21 +583,10 @@ public class SpoofCompiler\ntmp.toArray(new Hop[0]),tpl));\n}\n- //remove spurious lookups on main input of cell template\n- if( tpl instanceof CNodeCell || tpl instanceof CNodeOuterProduct ) {\n- CNodeData in1 = (CNodeData)tpl.getInput().get(0);\n- rFindAndRemoveLookup(tpl.getOutput(), in1);\n- }\n- else if( tpl instanceof CNodeMultiAgg ) {\n- CNodeData in1 = (CNodeData)tpl.getInput().get(0);\n- for( CNode output : ((CNodeMultiAgg)tpl).getOutputs() )\n- rFindAndRemoveLookup(output, in1);\n- }\n-\n//remove invalid plans with column indexing on main input\nif( tpl instanceof CNodeCell ) {\nCNodeData in1 = (CNodeData)tpl.getInput().get(0);\n- if( rHasLookupRC1(tpl.getOutput(), in1) ) {\n+ if( rHasLookupRC1(tpl.getOutput(), in1) || isLookupRC1(tpl.getOutput(), in1) ) {\ncplans2.remove(e.getKey());\nif( LOG.isTraceEnabled() )\nLOG.trace(\"Removed cplan due to invalid rc1 indexing on main input.\");\n@@ -606,16 +595,26 @@ public class SpoofCompiler\nelse if( tpl instanceof CNodeMultiAgg ) {\nCNodeData in1 = (CNodeData)tpl.getInput().get(0);\nfor( CNode output : ((CNodeMultiAgg)tpl).getOutputs() )\n- if( rHasLookupRC1(output, in1) ) {\n+ if( rHasLookupRC1(output, in1) || isLookupRC1(output, in1) ) {\ncplans2.remove(e.getKey());\nif( LOG.isTraceEnabled() )\nLOG.trace(\"Removed cplan due to invalid rc1 indexing on main input.\");\n}\n}\n+ //remove spurious lookups on main input of cell template\n+ if( tpl instanceof CNodeCell || tpl instanceof CNodeOuterProduct ) {\n+ CNodeData in1 = (CNodeData)tpl.getInput().get(0);\n+ rFindAndRemoveLookup(tpl.getOutput(), in1);\n+ }\n+ else if( tpl instanceof CNodeMultiAgg ) {\n+ CNodeData in1 = (CNodeData)tpl.getInput().get(0);\n+ rFindAndRemoveLookupMultiAgg((CNodeMultiAgg)tpl, in1);\n+ }\n+\n//remove cplan w/ single op and w/o agg\n- if( tpl instanceof CNodeCell && ((CNodeCell)tpl).getCellType()==CellType.NO_AGG\n- && TemplateUtils.hasSingleOperation(tpl) )\n+ if( tpl instanceof CNodeCell && ((((CNodeCell)tpl).getCellType()==CellType.NO_AGG\n+ && TemplateUtils.hasSingleOperation(tpl))|| TemplateUtils.hasNoOperation(tpl)) )\ncplans2.remove(e.getKey());\n//remove cplan if empty\n@@ -636,6 +635,20 @@ public class SpoofCompiler\nrCollectLeafIDs(c, leafs);\n}\n+ private static void rFindAndRemoveLookupMultiAgg(CNodeMultiAgg node, CNodeData mainInput) {\n+ //process all outputs individually\n+ for( CNode output : node.getOutputs() )\n+ rFindAndRemoveLookup(output, mainInput);\n+\n+ //handle special case, of lookup being itself the output node\n+ for( int i=0; i < node.getOutputs().size(); i++) {\n+ CNode tmp = node.getOutputs().get(i);\n+ if( TemplateUtils.isLookup(tmp) && tmp.getInput().get(0) instanceof CNodeData\n+ && ((CNodeData)tmp.getInput().get(0)).getHopID()==mainInput.getHopID() )\n+ node.getOutputs().set(i, tmp.getInput().get(0));\n+ }\n+ }\n+\nprivate static void rFindAndRemoveLookup(CNode node, CNodeData mainInput) {\nfor( int i=0; i<node.getInput().size(); i++ ) {\nCNode tmp = node.getInput().get(i);\n@@ -653,9 +666,7 @@ public class SpoofCompiler\nboolean ret = false;\nfor( int i=0; i<node.getInput().size() && !ret; i++ ) {\nCNode tmp = node.getInput().get(i);\n- if( tmp instanceof CNodeTernary && ((CNodeTernary)tmp).getType()==TernaryType.LOOKUP_RC1\n- && tmp.getInput().get(0) instanceof CNodeData\n- && ((CNodeData)tmp.getInput().get(0)).getHopID() == mainInput.getHopID())\n+ if( isLookupRC1(tmp, mainInput) )\nret = true;\nelse\nret |= rHasLookupRC1(tmp, mainInput);\n@@ -663,6 +674,12 @@ public class SpoofCompiler\nreturn ret;\n}\n+ private static boolean isLookupRC1(CNode node, CNodeData mainInput) {\n+ return (node instanceof CNodeTernary && ((CNodeTernary)node).getType()==TernaryType.LOOKUP_RC1\n+ && node.getInput().get(0) instanceof CNodeData\n+ && ((CNodeData)node.getInput().get(0)).getHopID() == mainInput.getHopID());\n+ }\n+\n/**\n* This plan cache maps CPlans to compiled and loaded classes in order\n* to reduce javac and JIT compilation overhead. It uses a simple LRU\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofFusedOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/SpoofFusedOp.java",
"diff": "@@ -41,6 +41,7 @@ public class SpoofFusedOp extends Hop implements MultiThreadedHop\nCOLUMN_DIMS_ROWS,\nCOLUMN_DIMS_COLS,\nSCALAR,\n+ MULTI_SCALAR,\nROW_RANK_DIMS, // right wdivmm\nCOLUMN_RANK_DIMS // left wdivmm\n}\n@@ -160,6 +161,10 @@ public class SpoofFusedOp extends Hop implements MultiThreadedHop\nsetDim1(0);\nsetDim2(0);\nbreak;\n+ case MULTI_SCALAR:\n+ setDim1(1); //row vector\n+ //dim2 statically set from outside\n+ break;\ncase ROW_RANK_DIMS:\nsetDim1(getInput().get(0).getDim1());\nsetDim2(getInput().get(1).getDim2());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeMultiAgg.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeMultiAgg.java",
"diff": "@@ -137,7 +137,7 @@ public class CNodeMultiAgg extends CNodeTpl\n@Override\npublic SpoofOutputDimsType getOutputDimType() {\n- return SpoofOutputDimsType.COLUMN_DIMS_COLS; //row vector\n+ return SpoofOutputDimsType.MULTI_SCALAR;\n}\n@Override\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeRow.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/cplan/CNodeRow.java",
"diff": "@@ -123,9 +123,14 @@ public class CNodeRow extends CNodeTpl\n@Override\npublic SpoofOutputDimsType getOutputDimType() {\n- return (_output._cols==1) ?\n- SpoofOutputDimsType.COLUMN_DIMS_ROWS : //column vector\n- SpoofOutputDimsType.COLUMN_DIMS_COLS; //row vector\n+ switch( _type ) {\n+ case NO_AGG: return SpoofOutputDimsType.INPUT_DIMS;\n+ case ROW_AGG: return SpoofOutputDimsType.ROW_DIMS;\n+ case COL_AGG: return SpoofOutputDimsType.COLUMN_DIMS_COLS; //row vector\n+ case COL_AGG_T: return SpoofOutputDimsType.COLUMN_DIMS_ROWS; //column vector\n+ default:\n+ throw new RuntimeException(\"Unsupported row type: \"+_type.toString());\n+ }\n}\n@Override\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateUtils.java",
"diff": "@@ -267,7 +267,8 @@ public class TemplateUtils\n}\npublic static boolean isLookup(CNode node) {\n- return isUnary(node, UnaryType.LOOKUP_R, UnaryType.LOOKUP_C, UnaryType.LOOKUP_RC);\n+ return isUnary(node, UnaryType.LOOKUP_R, UnaryType.LOOKUP_C, UnaryType.LOOKUP_RC)\n+ || isTernary(node, TernaryType.LOOKUP_RC1);\n}\npublic static boolean isUnary(CNode node, UnaryType...types) {\n@@ -275,6 +276,11 @@ public class TemplateUtils\n&& ArrayUtils.contains(types, ((CNodeUnary)node).getType());\n}\n+ public static boolean isTernary(CNode node, TernaryType...types) {\n+ return node instanceof CNodeTernary\n+ && ArrayUtils.contains(types, ((CNodeTernary)node).getType());\n+ }\n+\npublic static CNodeData createCNodeData(Hop hop, boolean compileLiterals) {\nCNodeData cdata = new CNodeData(hop);\ncdata.setLiteral(hop instanceof LiteralOp && (compileLiterals\n@@ -312,6 +318,11 @@ public class TemplateUtils\n|| output instanceof CNodeTernary) && hasOnlyDataNodeOrLookupInputs(output);\n}\n+ public static boolean hasNoOperation(CNodeTpl tpl) {\n+ return tpl.getOutput() instanceof CNodeData\n+ || isLookup(tpl.getOutput());\n+ }\n+\npublic static boolean hasOnlyDataNodeOrLookupInputs(CNode node) {\nboolean ret = true;\nfor( CNode c : node.getInput() )\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1525,1526] Fix codegen size propagation and cplan cleanup
This patch fixes the output size propagation of row and multi-aggregate
templates as well as plan cleanups of multi-aggregate templates where
the output nodes themselves can be indexing operations. |
49,738 | 16.04.2017 15:54:36 | 25,200 | e1ae4a9f93cad3f513ce3de8597cec29b7263205 | Performance compressed tsmm (ddc decomp, alloc, result) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC1.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC1.java",
"diff": "@@ -168,6 +168,16 @@ public class ColGroupDDC1 extends ColGroupDDC\n//note: append ok because final sort per row\n}\n+ @Override\n+ public void decompressToBlock(MatrixBlock target, int colpos) {\n+ int nrow = getNumRows();\n+ int ncol = getNumCols();\n+ double[] c = target.getDenseBlock();\n+ for( int i = 0; i < nrow; i++ )\n+ c[i] = _values[(_data[i]&0xFF)*ncol+colpos];\n+ target.recomputeNonZeros();\n+ }\n+\n@Override\nprotected void countNonZerosPerRow(int[] rnnz, int rl, int ru) {\nfinal int ncol = getNumCols();\n@@ -243,8 +253,6 @@ public class ColGroupDDC1 extends ColGroupDDC\nfinal int ncol = getNumCols();\nfinal int numVals = getNumValues();\n- if( 8*numVals < getNumRows() )\n- {\n//iterative over codes and pre-aggregate inputs per code (guaranteed <=255)\n//temporary array also avoids false sharing in multi-threaded environments\ndouble[] vals = allocDVector(numVals, true);\n@@ -261,21 +269,6 @@ public class ColGroupDDC1 extends ColGroupDDC\n}\n}\n}\n- else //general case\n- {\n- //iterate over codes, compute all, and add to the result\n- for( int i=0; i<nrow; i++ ) {\n- double aval = a[i];\n- if( aval != 0 ) {\n- int valOff = (_data[i]&0xFF) * ncol;\n- for( int j=0; j<ncol; j++ ) {\n- int colIx = _colIndexes[j];\n- c[colIx] += aval * _values[valOff+j];\n- }\n- }\n- }\n- }\n- }\n@Override\nprotected void computeSum(MatrixBlock result, KahanFunction kplus) {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC2.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC2.java",
"diff": "@@ -170,6 +170,16 @@ public class ColGroupDDC2 extends ColGroupDDC\n//note: append ok because final sort per row\n}\n+ @Override\n+ public void decompressToBlock(MatrixBlock target, int colpos) {\n+ int nrow = getNumRows();\n+ int ncol = getNumCols();\n+ double[] c = target.getDenseBlock();\n+ for( int i = 0; i < nrow; i++ )\n+ c[i] = _values[_data[i]*ncol+colpos];\n+ target.recomputeNonZeros();\n+ }\n+\n@Override\nprotected void countNonZerosPerRow(int[] rnnz, int rl, int ru) {\nfinal int ncol = getNumCols();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupOLE.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupOLE.java",
"diff": "@@ -200,8 +200,6 @@ public class ColGroupOLE extends ColGroupOffset\n@Override\npublic void decompressToBlock(MatrixBlock target, int colpos)\n- {\n- if( LOW_LEVEL_OPT && getNumValues() > 1 )\n{\nfinal int blksz = BitmapEncoder.BITMAP_BLOCK_SZ;\nfinal int numCols = getNumCols();\n@@ -210,7 +208,7 @@ public class ColGroupOLE extends ColGroupOffset\ndouble[] c = target.getDenseBlock();\n//cache blocking config and position array\n- int[] apos = new int[numVals];\n+ int[] apos = allocIVector(numVals, true);\n//cache conscious append via horizontal scans\nfor( int bi=0; bi<n; bi+=blksz ) {\n@@ -228,15 +226,8 @@ public class ColGroupOLE extends ColGroupOffset\napos[k] += len + 1;\n}\n}\n-\ntarget.recomputeNonZeros();\n}\n- else\n- {\n- //call generic decompression with decoder\n- super.decompressToBlock(target, colpos);\n- }\n- }\n@Override\npublic ColGroup scalarOperation(ScalarOperator op)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupRLE.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupRLE.java",
"diff": "@@ -191,8 +191,6 @@ public class ColGroupRLE extends ColGroupOffset\n@Override\npublic void decompressToBlock(MatrixBlock target, int colpos)\n- {\n- if( LOW_LEVEL_OPT && getNumValues() > 1 )\n{\nfinal int blksz = 128 * 1024;\nfinal int numCols = getNumCols();\n@@ -201,8 +199,8 @@ public class ColGroupRLE extends ColGroupOffset\ndouble[] c = target.getDenseBlock();\n//position and start offset arrays\n- int[] apos = new int[numVals];\nint[] astart = new int[numVals];\n+ int[] apos = allocIVector(numVals, true);\n//cache conscious append via horizontal scans\nfor( int bi=0; bi<n; bi+=blksz ) {\n@@ -225,15 +223,8 @@ public class ColGroupRLE extends ColGroupOffset\nastart[k] = start;\n}\n}\n-\ntarget.recomputeNonZeros();\n}\n- else\n- {\n- //call generic decompression with decoder\n- super.decompressToBlock(target, colpos);\n- }\n- }\n@Override\npublic void rightMultByVector(MatrixBlock vector, MatrixBlock result, int rl, int ru)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java",
"diff": "@@ -102,7 +102,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\npublic static final boolean TRANSPOSE_INPUT = true;\npublic static final boolean MATERIALIZE_ZEROS = false;\npublic static final long MIN_PAR_AGG_THRESHOLD = 16*1024*1024; //16MB\n- public static final boolean INVESTIGATE_ESTIMATES = false;\n+ public static boolean INVESTIGATE_ESTIMATES = false;\npublic static boolean ALLOW_DDC_ENCODING = true;\nprivate static final boolean LDEBUG = true; //local debug flag\nprivate static final Level LDEBUG_LEVEL = Level.DEBUG; //DEBUG/TRACE for details\n@@ -912,7 +912,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nBinaryOperator bop = new BinaryOperator(Multiply.getMultiplyFnObject());\nLibMatrixBincell.bincellOpInPlace(tmp, w, bop);\n}\n- leftMultByVectorTranspose(_colGroups, tmp, out, true);\n+ leftMultByVectorTranspose(_colGroups, tmp, out, true, true);\n//System.out.println(\"Compressed MMChain in \"+time.stop());\n@@ -1003,7 +1003,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nif( op.getNumThreads()>1 )\nleftMultByVectorTranspose(_colGroups, mb, ret, false, op.getNumThreads());\nelse\n- leftMultByVectorTranspose(_colGroups, mb, ret, false);\n+ leftMultByVectorTranspose(_colGroups, mb, ret, false, true);\n}\nelse {\n//NOTE: we could decompress and invoke super.aggregateBinary but for now\n@@ -1221,6 +1221,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nleftMultByTransposeSelf(_colGroups, out, 0, _colGroups.size());\n// post-processing\n+ LinearAlgebraUtils.copyUpperToLowerTriangle(out);\nout.recomputeNonZeros();\n}\n@@ -1278,6 +1279,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n}\n// post-processing\n+ LinearAlgebraUtils.copyUpperToLowerTriangle(out);\nout.recomputeNonZeros();\n}\n@@ -1402,7 +1404,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n* @param doTranspose if true, transpose vector\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- private static void leftMultByVectorTranspose(List<ColGroup> colGroups, MatrixBlock vector, MatrixBlock result, boolean doTranspose)\n+ private static void leftMultByVectorTranspose(List<ColGroup> colGroups, MatrixBlock vector, MatrixBlock result, boolean doTranspose, boolean allocTmp)\nthrows DMLRuntimeException\n{\n//transpose vector if required\n@@ -1417,6 +1419,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nresult.allocateDenseBlock();\n// setup memory pool for reuse\n+ if( allocTmp )\nColGroupValue.setupThreadLocalMemory(getMaxNumValues(colGroups));\n// delegate matrix-vector operation to each column group\n@@ -1425,6 +1428,7 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n}\n// post-processing\n+ if( allocTmp )\nColGroupValue.cleanupThreadLocalMemory();\nresult.recomputeNonZeros();\n}\n@@ -1488,9 +1492,14 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\nfinal int numRows = groups.get(0).getNumRows();\nfinal int numGroups = groups.size();\n- //preallocated dense matrix block\n- MatrixBlock lhs = new MatrixBlock(numRows, 1, false);\n+ //preallocated dense tmp matrix blocks\n+ MatrixBlock lhs = new MatrixBlock(1, numRows, false);\n+ MatrixBlock tmpret = new MatrixBlock(1, result.getNumColumns(), false);\nlhs.allocateDenseBlock();\n+ tmpret.allocateDenseBlock();\n+\n+ // setup memory pool for reuse\n+ ColGroupValue.setupThreadLocalMemory(getMaxNumValues(groups));\n//approach: for each colgroup, extract uncompressed columns one at-a-time\n//vector-matrix multiplies against remaining col groups\n@@ -1504,19 +1513,22 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n//for all uncompressed lhs columns vectors\nfor( int j=0; j<ixgroup.length; j++ ) {\n//decompress single column\n- lhs.reset(numRows, 1, false);\n+ if( !(group instanceof ColGroupDDC) )\n+ lhs.reset(1, numRows, false);\ngroup.decompressToBlock(lhs, j);\nif( !lhs.isEmptyBlock(false) ) {\n//compute vector-matrix partial result\n- MatrixBlock tmpret = new MatrixBlock(1,result.getNumColumns(),false);\n- leftMultByVectorTranspose(tmpList, lhs, tmpret, true);\n+ leftMultByVectorTranspose(tmpList, lhs, tmpret, false, false);\n//write partial results (disjoint non-zeros)\n- LinearAlgebraUtils.copyNonZerosToRowCol(result, tmpret, ixgroup[j]);\n+ LinearAlgebraUtils.copyNonZerosToUpperTriangle(result, tmpret, ixgroup[j]);\n}\n}\n}\n+\n+ //post processing\n+ ColGroupValue.cleanupThreadLocalMemory();\n}\n@SuppressWarnings(\"unchecked\")\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/utils/LinearAlgebraUtils.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/utils/LinearAlgebraUtils.java",
"diff": "@@ -147,25 +147,16 @@ public class LinearAlgebraUtils\nreturn val;\n}\n- public static void copyUpperToLowerTriangle( MatrixBlock ret )\n- {\n- double[] c = ret.getDenseBlock();\n- final int m = ret.getNumRows();\n- final int n = ret.getNumColumns();\n-\n- //copy symmetric values\n- for( int i=0, uix=0; i<m; i++, uix+=n )\n- for( int j=i+1, lix=j*n+i; j<n; j++, lix+=n )\n- c[ lix ] = c[ uix+j ];\n+ public static void copyUpperToLowerTriangle( MatrixBlock ret ) {\n+ LibMatrixMult.copyUpperToLowerTriangle(ret);\n}\n- public static void copyNonZerosToRowCol( MatrixBlock ret, MatrixBlock tmp, int ix )\n- {\n+ public static void copyNonZerosToUpperTriangle( MatrixBlock ret, MatrixBlock tmp, int ix ) {\n+ double[] a = tmp.getDenseBlock();\nfor(int i=0; i<tmp.getNumColumns(); i++) {\n- double val = tmp.quickGetValue(0, i);\n- if( val != 0 ) {\n- ret.setValueDenseUnsafe(ix, i, val);\n- ret.setValueDenseUnsafe(i, ix, val);\n+ if( a[i] != 0 ) {\n+ ret.setValueDenseUnsafe(\n+ (ix<i)?ix:i, (ix<i)?i:ix, a[i]);\n}\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixMult.java",
"diff": "@@ -3414,7 +3414,7 @@ public class LibMatrixMult\n*\n* @param ret matrix\n*/\n- private static void copyUpperToLowerTriangle( MatrixBlock ret )\n+ public static void copyUpperToLowerTriangle( MatrixBlock ret )\n{\ndouble[] c = ret.denseBlock;\nfinal int m = ret.rlen;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1530] Performance compressed tsmm (ddc decomp, alloc, result) |
49,738 | 16.04.2017 22:15:56 | 25,200 | cd49f224a02f4d0f03c3b03e36bb8c5cba7edc27 | Performance compressed rowsums (ddc1 groups, alloc) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC1.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC1.java",
"diff": "@@ -302,7 +302,7 @@ public class ColGroupDDC1 extends ColGroupDDC\ndouble[] c = result.getDenseBlock();\n//pre-aggregate nnz per value tuple\n- double[] vals = sumAllValues(kplus, kbuff);\n+ double[] vals = sumAllValues(kplus, kbuff, false);\n//scan data and add to result (use kahan plus not general KahanFunction\n//for correctness in case of sqk+)\n@@ -332,15 +332,25 @@ public class ColGroupDDC1 extends ColGroupDDC\n//iterative over codes of all groups and add to output\n//(use kahan plus not general KahanFunction for correctness in case of sqk+)\nint blksz = 1024; //16KB\n- for( int bi=rl; bi<ru; bi+=blksz )\n- for( int j=0; j<grps.length; j++ )\n+ double[] tmpAgg = new double[blksz];\n+ for( int bi=rl; bi<ru; bi+=blksz ) {\n+ Arrays.fill(tmpAgg, 0);\n+ //aggregate all groups\n+ for( int j=0; j<grps.length; j++ ) {\n+ double[] valsj = vals[j];\n+ byte[] dataj = grps[j]._data;\n+ for( int i=bi; i<Math.min(bi+blksz, ru); i++ )\n+ tmpAgg[i-bi] += valsj[dataj[i]&0xFF];\n+ }\n+ //add partial results of all ddc groups\nfor( int i=bi; i<Math.min(bi+blksz, ru); i++ ) {\nkbuff.set(c[2*i], c[2*i+1]);\n- kplus2.execute2(kbuff, vals[j][grps[j]._data[i]&0xFF]);\n+ kplus2.execute2(kbuff, tmpAgg[i-bi]);\nc[2*i] = kbuff._sum;\nc[2*i+1] = kbuff._correction;\n}\n}\n+ }\n@Override\npublic ColGroup scalarOperation(ScalarOperator op) throws DMLRuntimeException {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC2.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupDDC2.java",
"diff": "@@ -301,7 +301,7 @@ public class ColGroupDDC2 extends ColGroupDDC\ndouble[] c = result.getDenseBlock();\n//pre-aggregate nnz per value tuple\n- double[] vals = sumAllValues(kplus, kbuff);\n+ double[] vals = sumAllValues(kplus, kbuff, false);\n//scan data and add to result (use kahan plus not general KahanFunction\n//for correctness in case of sqk+)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupOLE.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupOLE.java",
"diff": "@@ -472,7 +472,7 @@ public class ColGroupOLE extends ColGroupOffset\n//step 1: prepare position and value arrays\nint[] apos = skipScan(numVals, rl);\n- double[] aval = sumAllValues(kplus, kbuff);\n+ double[] aval = sumAllValues(kplus, kbuff, false);\n//step 2: cache conscious row sums via horizontal scans\nfor( int bi=rl; bi<ru; bi+=blksz2 )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupRLE.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupRLE.java",
"diff": "@@ -482,7 +482,7 @@ public class ColGroupRLE extends ColGroupOffset\n//current pos / values per RLE list\nint[] astart = new int[numVals];\nint[] apos = skipScan(numVals, rl, astart);\n- double[] aval = sumAllValues(kplus, kbuff);\n+ double[] aval = sumAllValues(kplus, kbuff, false);\n//step 2: cache conscious matrix-vector via horizontal scans\nfor( int bi=rl; bi<ru; bi+=blksz )\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupValue.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/ColGroupValue.java",
"diff": "@@ -164,13 +164,18 @@ public abstract class ColGroupValue extends ColGroup\n}\nprotected final double[] sumAllValues(KahanFunction kplus, KahanObject kbuff) {\n+ return sumAllValues(kplus, kbuff, true);\n+ }\n+\n+ protected final double[] sumAllValues(KahanFunction kplus, KahanObject kbuff, boolean allocNew) {\n//quick path: sum\nif( getNumCols()==1 && kplus instanceof KahanPlus )\nreturn _values; //shallow copy of values\n//pre-aggregate value tuple\nfinal int numVals = getNumValues();\n- double[] ret = new double[numVals];\n+ double[] ret = allocNew ? new double[numVals] :\n+ allocDVector(numVals, false);\nfor( int k=0; k<numVals; k++ )\nret[k] = sumValues(k, kplus, kbuff);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1531] Performance compressed rowsums (ddc1 groups, alloc) |
49,738 | 19.04.2017 00:12:03 | 25,200 | 4c74a34349bd4eeb0f4e102db7bca1f09b2ced97 | Fix parfor optimizer (result/task partitioning on spark) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerConstrained.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerConstrained.java",
"diff": "@@ -131,7 +131,7 @@ public class OptimizerConstrained extends OptimizerRuleBased\nboolean flagRecompMR = rewriteSetExecutionStategy( pn, M0a, M1, M2, M3, flagLIX );\n//exec-type-specific rewrites\n- if( pn.getExecType() == ExecType.MR || pn.getExecType() == ExecType.SPARK )\n+ if( pn.getExecType() == getRemoteExecType() )\n{\nif( M1 > _rm && M3 <= _rm ) {\n// rewrite 1: data partitioning (apply conditional partitioning)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java",
"diff": "@@ -262,7 +262,7 @@ public class OptimizerRuleBased extends Optimizer\nboolean flagRecompMR = rewriteSetExecutionStategy( pn, M0a, M1, M2, M3, flagLIX );\n//exec-type-specific rewrites\n- if( pn.getExecType() == ExecType.MR || pn.getExecType()==ExecType.SPARK )\n+ if( pn.getExecType() == getRemoteExecType() )\n{\nif( M1 > _rm && M3 <= _rm ) {\n// rewrite 1: data partitioning (apply conditional partitioning)\n@@ -400,6 +400,10 @@ public class OptimizerRuleBased extends Optimizer\n_rkmax2 = (int) Math.ceil( PAR_K_FACTOR * _rk2 );\n}\n+ protected ExecType getRemoteExecType() {\n+ return OptimizerUtils.isSparkExecutionMode() ? ExecType.SPARK : ExecType.MR;\n+ }\n+\n///////\n//REWRITE set data partitioner\n///\n@@ -483,7 +487,7 @@ public class OptimizerRuleBased extends Optimizer\n//NOTE: for the moment, we do not partition according to the remote mem, because we can execute\n//it even without partitioning in CP. However, advanced optimizers should reason about this\n//double mold = h.getMemEstimate();\n- if( n.getExecType() == ExecType.MR || n.getExecType()==ExecType.SPARK //Opt Condition: MR/Spark\n+ if( n.getExecType() == getRemoteExecType() //Opt Condition: MR/Spark\n|| h.getMemEstimate() > thetaM ) //Opt Condition: mem estimate > constraint to force partitioning\n{\n//NOTE: subsequent rewrites will still use the MR mem estimate\n@@ -608,23 +612,22 @@ public class OptimizerRuleBased extends Optimizer\nParForProgramBlock pfpb = (ParForProgramBlock) o[1];\n//search for candidates\n- Collection<OptNode> cand = n.getNodeList(ExecType.MR);\n+ Collection<OptNode> cand = n.getNodeList(getRemoteExecType());\n//determine if applicable\nboolean apply = M < _rm //ops fit in remote memory budget\n&& !cand.isEmpty() //at least one MR\n- && isResultPartitionableAll(cand,pfpb.getResultVariables(),vars, pfpb.getIterablePredicateVars()[0]); // check candidates\n+ && isResultPartitionableAll(cand,pfpb.getResultVariables(),\n+ vars, pfpb.getIterablePredicateVars()[0]); // check candidates\n//recompile LIX\nif( apply )\n{\n- try\n- {\n+ try {\nfor(OptNode lix : cand)\nrecompileLIX( lix, vars );\n}\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nthrow new DMLRuntimeException(\"Unable to recompile LIX.\", ex);\n}\n}\n@@ -827,8 +830,9 @@ public class OptimizerRuleBased extends Optimizer\nboolean isCPOnlyPossible = isCPOnly || isCPOnlyPossible(n, _rm);\nString datapartitioner = n.getParam(ParamType.DATA_PARTITIONER);\n- ExecType REMOTE = OptimizerUtils.isSparkExecutionMode() ? ExecType.SPARK : ExecType.MR;\n- PDataPartitioner REMOTE_DP = OptimizerUtils.isSparkExecutionMode() ? PDataPartitioner.REMOTE_SPARK : PDataPartitioner.REMOTE_MR;\n+ ExecType REMOTE = getRemoteExecType();\n+ PDataPartitioner REMOTE_DP = OptimizerUtils.isSparkExecutionMode() ?\n+ PDataPartitioner.REMOTE_SPARK : PDataPartitioner.REMOTE_MR;\n//deciding on the execution strategy\nif( ConfigurationManager.isParallelParFor() //allowed remote parfor execution\n@@ -906,7 +910,7 @@ public class OptimizerRuleBased extends Optimizer\nExecType et = n.getExecType();\nboolean ret = ( et == ExecType.CP);\n- if( n.isLeaf() && (et == ExecType.MR || et == ExecType.SPARK) )\n+ if( n.isLeaf() && et == getRemoteExecType() )\n{\nHop h = OptTreeConverter.getAbstractPlanMapping().getMappedHop( n.getID() );\nif( h.getForcedExecType()!=LopProperties.ExecType.MR //e.g., -exec=hadoop\n@@ -1156,7 +1160,7 @@ public class OptimizerRuleBased extends Optimizer\n.getAbstractPlanMapping().getMappedProg(n.getID())[1];\n//decide on the replication factor\n- if( n.getExecType()==ExecType.MR || n.getExecType()==ExecType.SPARK )\n+ if( n.getExecType()==getRemoteExecType() )\n{\napply = true;\n@@ -1417,7 +1421,8 @@ public class OptimizerRuleBased extends Optimizer\n{\nsetTaskPartitioner( pn, PTaskPartitioner.FACTORING_CMAX );\n}\n- else if( pn.getExecType()==ExecType.MR && !jvmreuse && pn.hasOnlySimpleChilds() )\n+ else if( ((pn.getExecType()==ExecType.MR && !jvmreuse)\n+ || pn.getExecType()==ExecType.SPARK) && pn.hasOnlySimpleChilds() )\n{\n//for simple body programs without loops, branches, or function calls, we don't\n//expect much load imbalance and hence use static partitioning in order to\n@@ -2931,7 +2936,7 @@ public class OptimizerRuleBased extends Optimizer\nPResultMerge ret = null;\n//investigate details of current parfor node\n- boolean flagRemoteParFOR = (n.getExecType() == ExecType.MR || n.getExecType() == ExecType.SPARK);\n+ boolean flagRemoteParFOR = (n.getExecType() == getRemoteExecType());\nboolean flagLargeResult = hasLargeTotalResults( n, pfpb.getResultVariables(), vars, true );\nboolean flagRemoteLeftIndexing = hasResultMRLeftIndexing( n, pfpb.getResultVariables(), vars, true );\nboolean flagCellFormatWoCompare = determineFlagCellFormatWoCompare(pfpb.getResultVariables(), vars);\n@@ -3015,8 +3020,8 @@ public class OptimizerRuleBased extends Optimizer\n{\nString opName = n.getParam(ParamType.OPSTRING);\n//check opstring and exec type\n- if( opName !=null && opName.equals(LeftIndexingOp.OPSTRING) &&\n- (n.getExecType() == ExecType.MR || n.getExecType() == ExecType.SPARK) )\n+ if( opName != null && opName.equals(LeftIndexingOp.OPSTRING)\n+ && n.getExecType() == getRemoteExecType() )\n{\nLeftIndexingOp hop = (LeftIndexingOp) OptTreeConverter.getAbstractPlanMapping().getMappedHop(n.getID());\n//check agains set of varname\n@@ -3151,7 +3156,7 @@ public class OptimizerRuleBased extends Optimizer\nif( n.getNodeType() == NodeType.PARFOR )\n{\nrewriteSetResultMerge(n, vars, inLocal);\n- if( n.getExecType()==ExecType.MR || n.getExecType()==ExecType.SPARK )\n+ if( n.getExecType()==getRemoteExecType() )\ninLocal = false;\n}\nelse if( n.getChilds()!=null )\n@@ -3493,7 +3498,6 @@ public class OptimizerRuleBased extends Optimizer\nreturn count;\n}\n-\n////////////////////////\n// Helper methods //\n////////////////////////\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1546] Fix parfor optimizer (result/task partitioning on spark) |
49,768 | 19.04.2017 14:22:41 | 25,200 | 8bdcf106ca9bd04c0f68924ad5827eb7d7d54952 | [maven-release-plugin] prepare release v0.14.0-incubating-rc4 | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<version>18</version>\n</parent>\n<groupId>org.apache.systemml</groupId>\n- <version>0.14.0-incubating-SNAPSHOT</version>\n+ <version>0.14.0-incubating</version>\n<artifactId>systemml</artifactId>\n<packaging>jar</packaging>\n<name>SystemML</name>\n<connection>scm:git:git@github.com:apache/incubator-systemml</connection>\n<developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-systemml</developerConnection>\n<url>https://git-wip-us.apache.org/repos/asf?p=incubator-systemml.git</url>\n- <tag>HEAD</tag>\n+ <tag>v0.14.0-incubating-rc4</tag>\n</scm>\n<issueManagement>\n<system>JIRA</system>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [maven-release-plugin] prepare release v0.14.0-incubating-rc4 |
49,736 | 19.04.2017 14:20:00 | 28,800 | d69fdfe45cbd20efc618927a07b457bbb5f43023 | [MINOR] Updated the algorithm documentation to clearly mark the "Argument" section. | [
{
"change_type": "MODIFY",
"old_path": "docs/algorithms-classification.md",
"new_path": "docs/algorithms-classification.md",
"diff": "@@ -180,7 +180,7 @@ val prediction = model.transform(X_test_df)\n</div>\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the input matrix of feature vectors; each row\nconstitutes one feature vector.\n@@ -592,7 +592,7 @@ val prediction = model.transform(X_test_df)\n</div>\n</div>\n-#### Arguments\n+#### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors; each\nrow constitutes one feature vector.\n@@ -861,7 +861,7 @@ val prediction = model.transform(X_test_df)\n</div>\n-#### Arguments\n+#### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) containing the explanatory variables in\na matrix. Each row constitutes an example.\n@@ -1212,7 +1212,7 @@ val prediction = model.transform(X_test_df)\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors; each\nrow constitutes one feature vector.\n@@ -1472,7 +1472,7 @@ implementation is well-suited to handle large-scale data and builds a\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors; each row\nconstitutes one feature vector. Note that categorical features in $X$\n@@ -1887,7 +1887,7 @@ for classification in parallel.\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors; each row\nconstitutes one feature vector. Note that categorical features in $X$\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/algorithms-regression.md",
"new_path": "docs/algorithms-regression.md",
"diff": "@@ -168,7 +168,7 @@ y_test = lr.fit(df_train)\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors, each row\nconstitutes one feature vector\n@@ -570,7 +570,7 @@ lowest AIC is computed.\n</div>\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors, each row\ncontains one feature vector.\n@@ -782,7 +782,7 @@ distributions and link functions, see below for details.\n</div>\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors; each row\nconstitutes an example.\n@@ -1255,7 +1255,7 @@ distribution family is supported (see below for details).\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the matrix of feature vectors; each row is an\nexample.\n@@ -1503,7 +1503,7 @@ this step outside the scope of `GLM-predict.dml` for now.\n</div>\n-### Arguments\n+### Arguments for Spark and Hadoop invocation\n**X**: Location (on HDFS) to read the $n\\,{\\times}\\,m$-matrix $X$ of feature\nvectors, each row constitutes one feature vector (one record)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Updated the algorithm documentation to clearly mark the "Argument" section. |
49,738 | 18.04.2017 01:37:55 | 25,200 | 53fe1ae68ab3b5024ead0d258a213f3e4f392616 | [MINOR] Graceful value type casting of scalar function args, cleanup | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/debug/DMLDebuggerFunctions.java",
"new_path": "src/main/java/org/apache/sysml/debug/DMLDebuggerFunctions.java",
"diff": "@@ -34,14 +34,9 @@ import org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.instructions.Instruction;\nimport org.apache.sysml.runtime.instructions.MRJobInstruction;\n-import org.apache.sysml.runtime.instructions.cp.BooleanObject;\nimport org.apache.sysml.runtime.instructions.cp.BreakPointInstruction;\nimport org.apache.sysml.runtime.instructions.cp.CPInstruction;\n-import org.apache.sysml.runtime.instructions.cp.Data;\n-import org.apache.sysml.runtime.instructions.cp.DoubleObject;\n-import org.apache.sysml.runtime.instructions.cp.IntObject;\n-import org.apache.sysml.runtime.instructions.cp.ScalarObject;\n-import org.apache.sysml.runtime.instructions.cp.StringObject;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObjectFactory;\nimport org.apache.sysml.runtime.instructions.cp.BreakPointInstruction.BPINSTRUCTION_STATUS;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\n@@ -269,28 +264,8 @@ public class DMLDebuggerFunctions {\nif (variables != null && !variables.keySet().isEmpty()) {\nif (variables.get(varname) != null) {\nif (variables.get(varname).getDataType() == DataType.SCALAR) {\n- Data value;\n- switch(variables.get(varname).getValueType()) {\n- case DOUBLE:\n- double d = Double.parseDouble(args[1]);\n- value = (ScalarObject) new DoubleObject(d);\n- break;\n- case INT:\n- long i = Long.parseLong(args[1]);\n- value = (ScalarObject) new IntObject(i);\n- break;\n- case BOOLEAN:\n- boolean b = Boolean.parseBoolean(args[1]);\n- value = (ScalarObject) new BooleanObject(b);\n- break;\n- case STRING:\n- value = (ScalarObject) new StringObject(args[1]);\n- break;\n- default:\n- System.err.println(\"Invalid scalar value type.\");\n- return;\n- }\n- variables.put(varname, value);\n+ variables.put(varname, ScalarObjectFactory\n+ .createScalarObject(variables.get(varname).getValueType(), args[1]));\nSystem.out.println(varname + \" = \" + variables.get(varname).toString());\n}\nelse\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java",
"new_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java",
"diff": "@@ -70,12 +70,9 @@ import org.apache.sysml.parser.WhileStatement;\nimport org.apache.sysml.parser.WhileStatementBlock;\nimport org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n-import org.apache.sysml.runtime.instructions.cp.BooleanObject;\nimport org.apache.sysml.runtime.instructions.cp.Data;\n-import org.apache.sysml.runtime.instructions.cp.DoubleObject;\n-import org.apache.sysml.runtime.instructions.cp.IntObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\n-import org.apache.sysml.runtime.instructions.cp.StringObject;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObjectFactory;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.MatrixFormatMetaData;\nimport org.apache.sysml.udf.lib.DeNaNWrapper;\n@@ -695,16 +692,8 @@ public class InterProceduralAnalysis\n//always propagate scalar literals into functions\n//(for multiple calls, literal equivalence already checked)\nif( input instanceof LiteralOp ) {\n- LiteralOp lit = (LiteralOp)input;\n- ScalarObject scalar = null;\n- switch(input.getValueType()) {\n- case DOUBLE: scalar = new DoubleObject(lit.getDoubleValue()); break;\n- case INT: scalar = new IntObject(lit.getLongValue()); break;\n- case BOOLEAN: scalar = new BooleanObject(lit.getBooleanValue()); break;\n- case STRING: scalar = new StringObject(lit.getStringValue()); break;\n- default: //do nothing\n- }\n- vars.put(dat.getName(), scalar);\n+ vars.put(dat.getName(), ScalarObjectFactory\n+ .createScalarObject(input.getValueType(), (LiteralOp)input));\n}\n//propagate scalar variables into functions if called once\n//and input scalar is existing variable in symbol table\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java",
"diff": "@@ -56,12 +56,8 @@ import org.apache.sysml.parser.DataIdentifier;\nimport org.apache.sysml.parser.Statement;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\n-import org.apache.sysml.runtime.DMLRuntimeException;\n-import org.apache.sysml.runtime.instructions.cp.BooleanObject;\n-import org.apache.sysml.runtime.instructions.cp.DoubleObject;\n-import org.apache.sysml.runtime.instructions.cp.IntObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\n-import org.apache.sysml.runtime.instructions.cp.StringObject;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObjectFactory;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -196,26 +192,13 @@ public class HopRewriteUtils\npublic static ScalarObject getScalarObject( LiteralOp op )\n{\n- ScalarObject ret = null;\n-\n- try\n- {\n- switch( op.getValueType() )\n- {\n- case DOUBLE: ret = new DoubleObject(op.getDoubleValue()); break;\n- case INT: ret = new IntObject(op.getLongValue()); break;\n- case BOOLEAN: ret = new BooleanObject(op.getBooleanValue()); break;\n- case STRING: ret = new StringObject(op.getStringValue()); break;\n- default:\n- throw new DMLRuntimeException(\"Invalid scalar object value type: \"+op.getValueType());\n+ try {\n+ return ScalarObjectFactory\n+ .createScalarObject(op.getValueType(), op);\n}\n- }\n- catch(Exception ex)\n- {\n+ catch(Exception ex) {\nthrow new RuntimeException(\"Failed to create scalar object for constant. Continue.\", ex);\n}\n-\n- return ret;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/context/ExecutionContext.java",
"diff": "@@ -36,13 +36,10 @@ import org.apache.sysml.runtime.controlprogram.caching.FrameObject;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\nimport org.apache.sysml.runtime.controlprogram.caching.MatrixObject.UpdateType;\nimport org.apache.sysml.runtime.instructions.Instruction;\n-import org.apache.sysml.runtime.instructions.cp.BooleanObject;\nimport org.apache.sysml.runtime.instructions.cp.Data;\n-import org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.cp.FunctionCallCPInstruction;\n-import org.apache.sysml.runtime.instructions.cp.IntObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\n-import org.apache.sysml.runtime.instructions.cp.StringObject;\n+import org.apache.sysml.runtime.instructions.cp.ScalarObjectFactory;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUObject;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n@@ -53,7 +50,6 @@ import org.apache.sysml.runtime.matrix.data.FrameBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.Pair;\nimport org.apache.sysml.runtime.util.MapReduceTool;\n-import org.apache.sysml.runtime.util.UtilFunctions;\npublic class ExecutionContext\n@@ -347,31 +343,12 @@ public class ExecutionContext\nthrows DMLRuntimeException\n{\nif ( isLiteral ) {\n- switch (vt) {\n- case INT:\n- long intVal = UtilFunctions.parseToLong(name);\n- IntObject intObj = new IntObject(intVal);\n- return intObj;\n- case DOUBLE:\n- double doubleVal = Double.parseDouble(name);\n- DoubleObject doubleObj = new DoubleObject(doubleVal);\n- return doubleObj;\n- case BOOLEAN:\n- Boolean boolVal = Boolean.parseBoolean(name);\n- BooleanObject boolObj = new BooleanObject(boolVal);\n- return boolObj;\n- case STRING:\n- StringObject stringObj = new StringObject(name);\n- return stringObj;\n- default:\n- throw new DMLRuntimeException(\"Unknown value type: \" + vt + \" for variable: \" + name);\n- }\n+ return ScalarObjectFactory.createScalarObject(vt, name);\n}\nelse {\nData obj = getVariable(name);\n- if (obj == null) {\n+ if (obj == null)\nthrow new DMLRuntimeException(\"Unknown variable: \" + name);\n- }\nreturn (ScalarObject) obj;\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/FunctionCallCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/FunctionCallCPInstruction.java",
"diff": "@@ -151,6 +151,15 @@ public class FunctionCallCPInstruction extends CPInstruction\n//get input matrix/frame/scalar\ncurrFormalParamValue = (operand.getDataType()!=DataType.SCALAR) ? ec.getVariable(varname) :\nec.getScalarInput(varname, operand.getValueType(), operand.isLiteral());\n+\n+ //graceful value type conversion for scalar inputs with wrong type\n+ if( currFormalParamValue.getDataType() == DataType.SCALAR\n+ && currFormalParamValue.getValueType() != operand.getValueType() )\n+ {\n+ ScalarObject so = (ScalarObject) currFormalParamValue;\n+ currFormalParamValue = ScalarObjectFactory\n+ .createScalarObject(operand.getValueType(), so);\n+ }\n}\nfunctionVariables.put(currFormalParamName, currFormalParamValue);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ScalarObjectFactory.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ScalarObjectFactory.java",
"diff": "package org.apache.sysml.runtime.instructions.cp;\n+import org.apache.sysml.hops.HopsException;\n+import org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.parser.Expression.ValueType;\n+import org.apache.sysml.runtime.util.UtilFunctions;\npublic abstract class ScalarObjectFactory\n{\n+ public static ScalarObject createScalarObject(ValueType vt, String value) {\n+ switch( vt ) {\n+ case INT: return new IntObject(UtilFunctions.parseToLong(value));\n+ case DOUBLE: return new DoubleObject(Double.parseDouble(value));\n+ case BOOLEAN: return new BooleanObject(Boolean.parseBoolean(value));\n+ case STRING: return new StringObject(value);\n+ default: throw new RuntimeException(\"Unsupported scalar value type: \"+vt.name());\n+ }\n+ }\npublic static ScalarObject createScalarObject(ValueType vt, Object obj) {\nswitch( vt ) {\n@@ -30,7 +42,27 @@ public abstract class ScalarObjectFactory\ncase INT: return new IntObject((Long)obj);\ncase DOUBLE: return new DoubleObject((Double)obj);\ncase STRING: return new StringObject((String)obj);\n- default: throw new RuntimeException(\"Unsupported scalar object type: \"+vt.toString());\n+ default: throw new RuntimeException(\"Unsupported scalar value type: \"+vt.name());\n+ }\n+ }\n+\n+ public static ScalarObject createScalarObject(ValueType vt, ScalarObject so) {\n+ switch( vt ) {\n+ case DOUBLE: return new DoubleObject(so.getDoubleValue());\n+ case INT: return new IntObject(so.getLongValue());\n+ case BOOLEAN: return new BooleanObject(so.getBooleanValue());\n+ case STRING: return new StringObject(so.getStringValue());\n+ default: throw new RuntimeException(\"Unsupported scalar value type: \"+vt.name());\n+ }\n+ }\n+\n+ public static ScalarObject createScalarObject(ValueType vt, LiteralOp lit) throws HopsException {\n+ switch( vt ) {\n+ case DOUBLE: return new DoubleObject(lit.getDoubleValue());\n+ case INT: return new IntObject(lit.getLongValue());\n+ case BOOLEAN: return new BooleanObject(lit.getBooleanValue());\n+ case STRING: return new StringObject(lit.getStringValue());\n+ default: throw new RuntimeException(\"Unsupported scalar value type: \"+vt.name());\n}\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Graceful value type casting of scalar function args, cleanup |
49,738 | 18.04.2017 14:23:50 | 25,200 | 7b7b9ba9ba48faab05f396ddbac0c81eb2cc50f3 | Fix codegen cost model (missing common operations)
This patch includes ctable, centralmoment, and covariance in both binary
and ternary forms into the codegen cost model, which is used in the
cost-based plan selector. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"diff": "@@ -41,6 +41,7 @@ import org.apache.sysml.hops.Hop;\nimport org.apache.sysml.hops.Hop.AggOp;\nimport org.apache.sysml.hops.Hop.Direction;\nimport org.apache.sysml.hops.IndexingOp;\n+import org.apache.sysml.hops.LiteralOp;\nimport org.apache.sysml.hops.ParameterizedBuiltinOp;\nimport org.apache.sysml.hops.ReorgOp;\nimport org.apache.sysml.hops.TernaryOp;\n@@ -788,6 +789,19 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncurrent.getInput().get(1), 2) ? 1 : 16); break;\ncase MINUS_NZ:\ncase MINUS1_MULT: costs = 2; break;\n+ case CENTRALMOMENT:\n+ int type = (int) (current.getInput().get(1) instanceof LiteralOp ?\n+ HopRewriteUtils.getIntValueSafe((LiteralOp)current.getInput().get(1)) : 2);\n+ switch( type ) {\n+ case 0: costs = 1; break; //count\n+ case 1: costs = 8; break; //mean\n+ case 2: costs = 16; break; //cm2\n+ case 3: costs = 31; break; //cm3\n+ case 4: costs = 51; break; //cm4\n+ case 5: costs = 16; break; //variance\n+ }\n+ break;\n+ case COVARIANCE: costs = 23; break;\ndefault:\nthrow new RuntimeException(\"Cost model not \"\n+ \"implemented yet for: \"+((BinaryOp)current).getOp());\n@@ -797,6 +811,20 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nswitch( ((TernaryOp)current).getOp() ) {\ncase PLUS_MULT:\ncase MINUS_MULT: costs = 2; break;\n+ case CTABLE: costs = 3; break;\n+ case CENTRALMOMENT:\n+ int type = (int) (current.getInput().get(1) instanceof LiteralOp ?\n+ HopRewriteUtils.getIntValueSafe((LiteralOp)current.getInput().get(1)) : 2);\n+ switch( type ) {\n+ case 0: costs = 2; break; //count\n+ case 1: costs = 9; break; //mean\n+ case 2: costs = 17; break; //cm2\n+ case 3: costs = 32; break; //cm3\n+ case 4: costs = 52; break; //cm4\n+ case 5: costs = 17; break; //variance\n+ }\n+ break;\n+ case COVARIANCE: costs = 23; break;\ndefault:\nthrow new RuntimeException(\"Cost model not \"\n+ \"implemented yet for: \"+((TernaryOp)current).getOp());\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1537] Fix codegen cost model (missing common operations)
This patch includes ctable, centralmoment, and covariance in both binary
and ternary forms into the codegen cost model, which is used in the
cost-based plan selector. |
49,738 | 18.04.2017 14:27:39 | 25,200 | b641edc165aab129559a5db1bf5e9d050cd30b37 | Improved dynamic recompilation (stats update p rewrites) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java",
"new_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java",
"diff": "@@ -198,9 +198,15 @@ public class Recompiler\nrUpdateStatistics( hopRoot, vars );\n// dynamic hop rewrites\n- if( !inplace )\n+ if( !inplace ) {\n_rewriter.get().rewriteHopDAGs( hops, null );\n+ //update stats after rewrites\n+ Hop.resetVisitStatus(hops);\n+ for( Hop hopRoot : hops )\n+ rUpdateStatistics( hopRoot, vars );\n+ }\n+\n// refresh memory estimates (based on updated stats,\n// before: init memo table with propagated worst-case estimates,\n// after: extract worst-case estimates from memo table\n@@ -303,9 +309,14 @@ public class Recompiler\nrUpdateStatistics( hops, vars );\n// dynamic hop rewrites\n- if( !inplace )\n+ if( !inplace ) {\n_rewriter.get().rewriteHopDAG( hops, null );\n+ //update stats after rewrites\n+ hops.resetVisitStatus();\n+ rUpdateStatistics( hops, vars );\n+ }\n+\n// refresh memory estimates (based on updated stats)\nMemoTable memo = new MemoTable();\nhops.resetVisitStatus();\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1321] Improved dynamic recompilation (stats update p rewrites) |
49,736 | 20.04.2017 16:13:55 | 25,200 | 39bf1e6d5b08fa5eadab17a17867ca407c84d0a5 | [MINOR] Supported Caffe2DML via pip
Enables using Caffe2DML directly via pip without requiring
driver-class-path SystemML.jar | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mllearn/estimators.py",
"new_path": "src/main/python/systemml/mllearn/estimators.py",
"diff": "@@ -570,6 +570,7 @@ class Caffe2DML(BaseSystemMLClassifier):\n\"\"\"\nself.sqlCtx = sqlCtx\nself.sc = sqlCtx._sc\n+ createJavaObject(self.sc, 'dummy')\nself.uid = \"Caffe2DML\"\nself.model = None\nif len(input_shape) != 3:\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-692] [MINOR] Supported Caffe2DML via pip
- Enables using Caffe2DML directly via pip without requiring
--driver-class-path SystemML.jar |
49,738 | 21.04.2017 00:08:09 | 25,200 | bda96a8e8690f8821c476d5370c0d39e41da19a2 | New multi-threaded colum-wise rexpand operations
This patch introduces a multi-threaded runtime for the internal
parameterized built-in function rexpand, specifically column expansion,
along with necessary compiler modifications. The runtime improvements
are moderate for both dense and sparse, ranging from 1.6x to 2x due to
better write bandwidth exploitation (dense) and latency hiding (sparse). | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ParameterizedBuiltinOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ParameterizedBuiltinOp.java",
"diff": "@@ -784,8 +784,9 @@ public class ParameterizedBuiltinOp extends Hop implements MultiThreadedHop\n{\nif( et == ExecType.CP || et == ExecType.SPARK )\n{\n+ int k = OptimizerUtils.getConstrainedNumThreads( _maxNumThreads );\nParameterizedBuiltin pbilop = new ParameterizedBuiltin(inputlops,\n- HopsParameterizedBuiltinLops.get(_op), getDataType(), getValueType(), et);\n+ HopsParameterizedBuiltinLops.get(_op), getDataType(), getValueType(), et, k);\nsetOutputDimensions(pbilop);\nsetLineNumbers(pbilop);\nsetLops(pbilop);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java",
"new_path": "src/main/java/org/apache/sysml/hops/rewrite/HopRewriteUtils.java",
"diff": "@@ -1048,23 +1048,27 @@ public class HopRewriteUtils\n//////////////////////////////////////\n// utils for lookup tables\n- public static boolean isValidOp( AggOp input, AggOp[] validTab ) {\n+ public static boolean isValidOp( AggOp input, AggOp... validTab ) {\nreturn ArrayUtils.contains(validTab, input);\n}\n- public static boolean isValidOp( OpOp1 input, OpOp1[] validTab ) {\n+ public static boolean isValidOp( OpOp1 input, OpOp1... validTab ) {\nreturn ArrayUtils.contains(validTab, input);\n}\n- public static boolean isValidOp( OpOp2 input, OpOp2[] validTab ) {\n+ public static boolean isValidOp( OpOp2 input, OpOp2... validTab ) {\nreturn ArrayUtils.contains(validTab, input);\n}\n- public static boolean isValidOp( ReOrgOp input, ReOrgOp[] validTab ) {\n+ public static boolean isValidOp( ReOrgOp input, ReOrgOp... validTab ) {\nreturn ArrayUtils.contains(validTab, input);\n}\n- public static int getValidOpPos( OpOp2 input, OpOp2[] validTab ) {\n+ public static boolean isValidOp( ParamBuiltinOp input, ParamBuiltinOp... validTab ) {\n+ return ArrayUtils.contains(validTab, input);\n+ }\n+\n+ public static int getValidOpPos( OpOp2 input, OpOp2... validTab ) {\nreturn ArrayUtils.indexOf(validTab, input);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/lops/ParameterizedBuiltin.java",
"new_path": "src/main/java/org/apache/sysml/lops/ParameterizedBuiltin.java",
"diff": "@@ -48,8 +48,17 @@ public class ParameterizedBuiltin extends Lop\nprivate HashMap<String, Lop> _inputParams;\nprivate boolean _bRmEmptyBC;\n+ //cp-specific parameters\n+ private int _numThreads = 1;\n+\npublic ParameterizedBuiltin(HashMap<String, Lop> paramLops, OperationTypes op, DataType dt, ValueType vt, ExecType et)\nthrows HopsException\n+ {\n+ this(paramLops, op, dt, vt, et, 1);\n+ }\n+\n+ public ParameterizedBuiltin(HashMap<String, Lop> paramLops, OperationTypes op, DataType dt, ValueType vt, ExecType et, int k)\n+ throws HopsException\n{\nsuper(Lop.Type.ParameterizedBuiltin, dt, vt);\n_operation = op;\n@@ -60,6 +69,7 @@ public class ParameterizedBuiltin extends Lop\n}\n_inputParams = paramLops;\n+ _numThreads = k;\nboolean breaksAlignment = false;\nboolean aligner = false;\n@@ -230,7 +240,14 @@ public class ParameterizedBuiltin extends Lop\nsb.append(OPERAND_DELIMITOR);\n}\n- sb.append(this.prepOutputOperand(output));\n+ if( getExecType()==ExecType.CP && _operation == OperationTypes.REXPAND ) {\n+ sb.append( \"k\" );\n+ sb.append( Lop.NAME_VALUE_SEPARATOR );\n+ sb.append( _numThreads );\n+ sb.append(OPERAND_DELIMITOR);\n+ }\n+\n+ sb.append(prepOutputOperand(output));\nreturn sb.toString();\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/CompressedMatrixBlock.java",
"diff": "@@ -2007,11 +2007,11 @@ public class CompressedMatrixBlock extends MatrixBlock implements Externalizable\n@Override\npublic MatrixBlock rexpandOperations(MatrixBlock ret, double max,\n- boolean rows, boolean cast, boolean ignore)\n+ boolean rows, boolean cast, boolean ignore, int k)\nthrows DMLRuntimeException {\nprintDecompressWarning(\"rexpandOperations\");\nMatrixBlock tmp = isCompressed() ? decompress() : this;\n- return tmp.rexpandOperations(ret, max, rows, cast, ignore);\n+ return tmp.rexpandOperations(ret, max, rows, cast, ignore, k);\n}\n@Override\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/parfor/opt/OptimizerRuleBased.java",
"diff": "@@ -1359,7 +1359,8 @@ public class OptimizerRuleBased extends Optimizer\nif( ConfigurationManager.isParallelMatrixOperations()\n&& h instanceof MultiThreadedHop //abop, datagenop, qop, paramop\n&& !( h instanceof ParameterizedBuiltinOp //only paramop-grpagg\n- && ((ParameterizedBuiltinOp)h).getOp()!=ParamBuiltinOp.GROUPEDAGG)\n+ && !HopRewriteUtils.isValidOp(((ParameterizedBuiltinOp)h).getOp(),\n+ ParamBuiltinOp.GROUPEDAGG, ParamBuiltinOp.REXPAND))\n&& !( h instanceof UnaryOp //only unaryop-cumulativeagg\n&& !((UnaryOp)h).isCumulativeUnaryOperation() )\n&& !( h instanceof ReorgOp //only reorgop-transpose\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ParameterizedBuiltinCPInstruction.java",
"diff": "@@ -57,7 +57,6 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\nprivate static final String TOSTRING_SEPARATOR = \" \";\nprivate static final String TOSTRING_LINESEPARATOR = \"\\n\";\n-\nprivate int arity;\nprotected HashMap<String,String> params;\n@@ -248,7 +247,9 @@ public class ParameterizedBuiltinCPInstruction extends ComputationCPInstruction\nboolean dirVal = params.get(\"dir\").equals(\"rows\");\nboolean cast = Boolean.parseBoolean(params.get(\"cast\"));\nboolean ignore = Boolean.parseBoolean(params.get(\"ignore\"));\n- MatrixBlock ret = (MatrixBlock) target.rexpandOperations(new MatrixBlock(), maxVal, dirVal, cast, ignore);\n+ int numThreads = Integer.parseInt(params.get(\"k\"));\n+ MatrixBlock ret = (MatrixBlock) target.rexpandOperations(\n+ new MatrixBlock(), maxVal, dirVal, cast, ignore, numThreads);\n//release locks\nec.setMatrixOutput(output.getName(), ret);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixReorg.java",
"diff": "@@ -645,7 +645,7 @@ public class LibMatrixReorg\n* @return output matrix\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- public static MatrixBlock rexpand(MatrixBlock in, MatrixBlock ret, double max, boolean rows, boolean cast, boolean ignore)\n+ public static MatrixBlock rexpand(MatrixBlock in, MatrixBlock ret, double max, boolean rows, boolean cast, boolean ignore, int k)\nthrows DMLRuntimeException\n{\n//prepare parameters\n@@ -669,7 +669,7 @@ public class LibMatrixReorg\nif( rows )\nreturn rexpandRows(in, ret, lmax, cast, ignore);\nelse //cols\n- return rexpandColumns(in, ret, lmax, cast, ignore);\n+ return rexpandColumns(in, ret, lmax, cast, ignore, k);\n}\n/**\n@@ -694,7 +694,7 @@ public class LibMatrixReorg\n//execute rexpand operations incl sanity checks\n//TODO more robust (memory efficient) implementation w/o tmp block\n- MatrixBlock tmp = rexpand(in, new MatrixBlock(), max, rows, cast, ignore);\n+ MatrixBlock tmp = rexpand(in, new MatrixBlock(), max, rows, cast, ignore, 1);\n//prepare outputs blocks (slice tmp block into output blocks )\nif( rows ) //expanded vertically\n@@ -1909,7 +1909,7 @@ public class LibMatrixReorg\nreturn ret;\n}\n- private static MatrixBlock rexpandColumns(MatrixBlock in, MatrixBlock ret, int max, boolean cast, boolean ignore)\n+ private static MatrixBlock rexpandColumns(MatrixBlock in, MatrixBlock ret, int max, boolean cast, boolean ignore, int k)\nthrows DMLRuntimeException\n{\n//set meta data\n@@ -1918,10 +1918,43 @@ public class LibMatrixReorg\nfinal long nnz = in.nonZeros;\nboolean sp = MatrixBlock.evalSparseFormatInMemory(rlen, clen, nnz);\nret.reset(rlen, clen, sp);\n+ ret.allocateDenseOrSparseBlock();\n+ //execute rexpand columns\n+ long rnnz = 0; //real nnz (due to cutoff max)\n+ if( k <= 1 || in.getNumRows() <= PAR_NUMCELL_THRESHOLD ) {\n+ rnnz = rexpandColumns(in, ret, max, cast, ignore, 0, rlen);\n+ }\n+ else {\n+ try {\n+ ExecutorService pool = Executors.newFixedThreadPool( k );\n+ ArrayList<RExpandColsTask> tasks = new ArrayList<RExpandColsTask>();\n+ int blklen = (int)(Math.ceil((double)rlen/k/8));\n+ for( int i=0; i<8*k & i*blklen<rlen; i++ )\n+ tasks.add(new RExpandColsTask(in, ret,\n+ max, cast, ignore, i*blklen, Math.min((i+1)*blklen, rlen)));\n+ List<Future<Long>> taskret = pool.invokeAll(tasks);\n+ pool.shutdown();\n+ for( Future<Long> task : taskret )\n+ rnnz += task.get();\n+ }\n+ catch(Exception ex) {\n+ throw new DMLRuntimeException(ex);\n+ }\n+ }\n+\n+ //post-processing\n+ ret.setNonZeros(rnnz);\n+\n+ return ret;\n+ }\n+\n+ private static long rexpandColumns(MatrixBlock in, MatrixBlock ret, int max, boolean cast, boolean ignore, int rl, int ru)\n+ throws DMLRuntimeException\n+ {\n//expand input horizontally (input vector likely dense\n//but generic implementation for general case)\n- for( int i=0; i<rlen; i++ )\n+ for( int i=rl; i<ru; i++ )\n{\n//get value and cast if necessary (table)\ndouble val = in.quickGetValue(i, 0);\n@@ -1933,11 +1966,19 @@ public class LibMatrixReorg\nthrow new DMLRuntimeException(\"Invalid input value <= 0 for ignore=false: \"+val);\n//set expanded value if matching\n- if( val == Math.floor(val) && val >= 1 && val <= max )\n- ret.appendValue(i, (int)(val-1), 1);\n+ if( val == Math.floor(val) && val >= 1 && val <= max ) {\n+ //update target without global nnz maintenance\n+ if( ret.isInSparseFormat() ) {\n+ ret.sparseBlock.allocate(i, 1);\n+ ret.sparseBlock.append(i, (int)(val-1), 1);\n+ }\n+ else\n+ ret.setValueDenseUnsafe(i, (int)(val-1), 1);\n+ }\n}\n- return ret;\n+ //recompute nnz of partition\n+ return ret.recomputeNonZeros(rl, ru-1, 0, ret.getNumColumns()-1);\n}\nprivate static void copyColVector( MatrixBlock in, int ixin, double[] tmp, int[] tmpi, int len)\n@@ -2145,4 +2186,30 @@ public class LibMatrixReorg\nreturn countNnzPerColumn(_in, _rl, _ru);\n}\n}\n+\n+ private static class RExpandColsTask implements Callable<Long>\n+ {\n+ private final MatrixBlock _in;\n+ private final MatrixBlock _out;\n+ private final int _max;\n+ private final boolean _cast;\n+ private final boolean _ignore;\n+ private final int _rl;\n+ private final int _ru;\n+\n+ protected RExpandColsTask(MatrixBlock in, MatrixBlock out, int max, boolean cast, boolean ignore, int rl, int ru) {\n+ _in = in;\n+ _out = out;\n+ _max = max;\n+ _cast = cast;\n+ _ignore = ignore;\n+ _rl = rl;\n+ _ru = ru;\n+ }\n+\n+ @Override\n+ public Long call() throws DMLRuntimeException {\n+ return rexpandColumns(_in, _out, _max, _cast, _ignore, _rl, _ru);\n+ }\n+ }\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"diff": "@@ -5067,11 +5067,11 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nreturn removeEmptyOperations(ret, rows, null);\n}\n- public MatrixBlock rexpandOperations( MatrixBlock ret, double max, boolean rows, boolean cast, boolean ignore )\n+ public MatrixBlock rexpandOperations( MatrixBlock ret, double max, boolean rows, boolean cast, boolean ignore, int k )\nthrows DMLRuntimeException\n{\nMatrixBlock result = checkType(ret);\n- return LibMatrixReorg.rexpand(this, result, max, rows, cast, ignore);\n+ return LibMatrixReorg.rexpand(this, result, max, rows, cast, ignore, k);\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1551] New multi-threaded colum-wise rexpand operations
This patch introduces a multi-threaded runtime for the internal
parameterized built-in function rexpand, specifically column expansion,
along with necessary compiler modifications. The runtime improvements
are moderate for both dense and sparse, ranging from 1.6x to 2x due to
better write bandwidth exploitation (dense) and latency hiding (sparse). |
49,738 | 21.04.2017 03:21:19 | 25,200 | ea6e2fe39077d6d914bd8024b5950096602ade8e | [HOTFIX] Fix recompute nonzeros for row range of csr sparse block | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"diff": "@@ -1144,7 +1144,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\nif( sparse && sparseBlock!=null ) //SPARSE (max long)\n{\n//note: rlen might be <= sparseBlock.numRows()\n- nonZeros = sparseBlock.size(0, rlen);\n+ nonZeros = sparseBlock.size(0, sparseBlock.numRows());\n}\nelse if( !sparse && denseBlock!=null ) //DENSE (max int)\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] Fix recompute nonzeros for row range of csr sparse block |
49,717 | 21.04.2017 14:25:50 | 25,200 | f73673d59383ac947111cb84787cfa4df3ca7344 | Added python script to launch systemml in standalone mode
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "bin/systemml-standalone.py",
"diff": "+#!/usr/bin/env python\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+import os\n+import shutil\n+import sys\n+from os.path import join, exists\n+\n+\n+# error help print\n+def print_usage_and_exit():\n+ this_script = sys.argv[0]\n+ print('Usage: ' + this_script + ' <dml-filename> [arguments]')\n+ sys.exit(1)\n+\n+\n+# from http://stackoverflow.com/questions/1724693/find-a-file-in-python\n+def find_file(name, path):\n+ for root, dirs, files in os.walk(path):\n+ if name in files:\n+ return join(root, name)\n+ return None\n+\n+\n+if len(sys.argv) < 2:\n+ print('Wrong usage')\n+ print_usage_and_exit()\n+\n+\n+# find the systemML root path which contains the bin folder, the script folder and the target folder\n+# tolerate path with spaces\n+script_dir = os.path.dirname(os.path.realpath(__file__))\n+project_root_dir = os.path.dirname(script_dir)\n+user_dir = os.getcwd()\n+\n+scripts_dir = join(project_root_dir, 'scripts')\n+build_dir = join(project_root_dir, 'target')\n+lib_dir = join(build_dir, 'lib')\n+dml_script_class = join(build_dir, 'classes', 'org', 'apache', 'sysml', 'api', 'DMLScript.class')\n+hadoop_home = join(lib_dir, 'hadoop')\n+\n+\n+build_err_msg = 'You must build the project before running this script.'\n+build_dir_err_msg = 'Could not find target directory ' + build_dir + '. ' + build_err_msg\n+\n+lib_dir_err_msg = 'Could not find required libraries.' + build_err_msg\n+dml_script_err_msg = 'Could not find ' + dml_script_class + '. ' + build_err_msg\n+\n+# check if the project had been built and the jar files exist\n+if not(exists(build_dir)):\n+ print(build_dir_err_msg)\n+ sys.exit(1)\n+if not(exists(lib_dir)):\n+ print(lib_dir_err_msg)\n+ sys.exit(1)\n+if not(exists(dml_script_class)):\n+ print(dml_script_err_msg)\n+ sys.exit(1)\n+\n+print('================================================================================')\n+\n+\n+# if the present working directory is the project root or bin folder, then use the temp folder as user.dir\n+if user_dir == project_root_dir or user_dir == join(project_root_dir, 'bin'):\n+ user_dir = join(project_root_dir, 'temp')\n+ print('Output dir: ' + user_dir)\n+\n+# if the SystemML-config.xml does not exist, create it from the template\n+systemml_config_path = join(project_root_dir, 'conf', 'SystemML-config.xml')\n+systemml_template_config_path = join(project_root_dir, 'conf', 'SystemML-config.xml.template')\n+if not(exists(systemml_config_path)):\n+ shutil.copyfile(systemml_template_config_path, systemml_config_path)\n+ print('... created ' + systemml_config_path)\n+\n+# if the log4j.properties do not exist, create them from the template\n+log4j_properties_path = join(project_root_dir, 'conf', 'log4j.properties')\n+log4j_template_properties_path = join(project_root_dir, 'conf', 'log4j.properties.template')\n+if not(exists(log4j_properties_path)):\n+ shutil.copyfile(log4j_template_properties_path, log4j_properties_path)\n+ print('... created ' + log4j_properties_path)\n+\n+\n+script_file = sys.argv[1]\n+\n+# if the script file path was omitted, try to complete the script path\n+if not(exists(script_file)):\n+ script_file_name = os.path.abspath(script_file)\n+ script_file_found = find_file(script_file, scripts_dir)\n+ if script_file_found is None:\n+ print('Could not find DML script: ' + script_file)\n+ print_usage_and_exit()\n+ else:\n+ script_file = script_file_found\n+ print('DML Script:' + script_file)\n+\n+# add libraries which were generated by the build to the classpath\n+systemml_jar = join(build_dir, 'classes')\n+\n+\n+# For the *nix and windows, os.pathsep works reliably\n+# however for cygwin, the pathsep is set for *nix, which is ':'\n+# but the underlying java, which is a windows program requires ';'\n+# also all arguments passed to the JVM need to be converted to windows style\n+# if in the cygwin environment\n+lib_dir_star = join(lib_dir, '*')\n+if sys.platform == 'cygwin':\n+ classpath_sep = os.pathsep\n+ classpath_sep = ';'\n+ lib_dir = os.popen('cygpath -pw ' + lib_dir).read().strip()\n+ lib_dir_star = '\"' + lib_dir + \"\\*\" + '\"'\n+ systemml_jar = '\"' + os.popen('cygpath -pw ' + systemml_jar).read().strip() + '\"'\n+ hadoop_home = '\"' + os.popen('cygpath -pw ' + hadoop_home).read().strip() + '\"'\n+ log4j_properties_path = '\"' + os.popen('cygpath -pw ' + log4j_properties_path).read().strip() + '\"'\n+ user_dir = '\"' + os.popen('cygpath -pw ' + user_dir).read().strip() + '\"'\n+ script_file = '\"' + os.popen('cygpath -pw ' + script_file).read().strip() + '\"'\n+ systemml_config_path = '\"' + os.popen('cygpath -pw ' + systemml_config_path).read().strip() + '\"'\n+ classpath = lib_dir_star + '\\\\' + classpath_sep + systemml_jar\n+else:\n+ #classpath = '\"' + lib_dir_star + '\"' + os.pathsep + '\"' + systemml_jar + '\"'\n+ classpath = lib_dir_star + os.pathsep + systemml_jar\n+\n+\n+# Set the HADOOP_HOME environment variable\n+if 'HADOOP_HOME' not in os.environ:\n+ os.environ['HADOOP_HOME'] = hadoop_home\n+\n+\n+print('================================================================================')\n+\n+# Set default Java options\n+systemml_default_java_opts = \\\n+ '-Xmx8g -Xms4g -Xmn1g ' + \\\n+ '-cp ' + classpath + ' ' + \\\n+ '-Dlog4j.configuration=file:' + log4j_properties_path + ' ' \\\n+ '-Duser.dir=' + user_dir\n+\n+\n+# Reads in key-value pairs from the conf/systemml-env.sh file\n+def parse_env_file(env_file_path):\n+ env_vars = {}\n+ with open(env_file_path) as f:\n+ for l in f:\n+ l = l.strip()\n+ if l and not(l.startswith('#')) and '=' in l:\n+ k, v = l.split('=', 1)\n+ k = k.strip()\n+ v = v.strip()\n+ if len(v) > 0:\n+ # strip quotes\n+ if v[0] == v[len(v) - 1] and v[0] in ['\"', \"'\"]:\n+ v = v[1:-1]\n+\n+ env_vars[k] = v\n+ return env_vars\n+\n+\n+# Add any custom Java options set by the user at command line, overriding defaults as necessary.\n+if 'SYSTEMML_JAVA_OPTS' in os.environ:\n+ systemml_java_opts = os.environ['SYSTEMML_JAVA_OPTS']\n+ systemml_default_java_opts = systemml_default_java_opts + ' ' + systemml_java_opts\n+ # del os.environ['SYSTEMML_JAVA_OPTS']\n+\n+# Add any custom Java options set by the user in the environment variables file,\n+# overriding defaults as necessary.\n+systemml_env_path = join(project_root_dir, 'conf', 'systemml-env.sh')\n+if exists(systemml_env_path):\n+ env_vars = parse_env_file(systemml_env_path)\n+ os.environ.update(env_vars)\n+\n+\n+# Invoke the jar with options and arguments\n+cmd = ['java', systemml_default_java_opts, 'org.apache.sysml.api.DMLScript', '-f', script_file, '-exec singlenode', '-config', systemml_config_path] + sys.argv[2:]\n+# For debugging\n+# print(' '.join(cmd))\n+\n+return_code = os.system(' '.join(cmd))\n+\n+if return_code != 0:\n+ print('Failed to run SystemML. Exit code :' + str(return_code))\n+ print(' '.join(cmd))\n"
}
] | Java | Apache License 2.0 | apache/systemds | Added python script to launch systemml in standalone mode
Closes #461 |
49,736 | 21.04.2017 13:58:22 | 28,800 | 9ed27ad6066a143a0e5ac5ccb800c7ca20e81ceb | Support GPU via Python MLContext API | [
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mlcontext.py",
"new_path": "src/main/python/systemml/mlcontext.py",
"diff": "@@ -351,6 +351,17 @@ class MLContext(object):\nself._ml.setStatistics(bool(statistics))\nreturn self\n+ def setGPU(self, enable):\n+ \"\"\"\n+ Whether or not to enable GPU.\n+\n+ Parameters\n+ ----------\n+ enable: boolean\n+ \"\"\"\n+ self._ml.setGPU(bool(enable))\n+ return self\n+\ndef setStatisticsMaxHeavyHitters(self, maxHeavyHitters):\n\"\"\"\nThe maximum number of heavy hitters that are printed as part of the statistics.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1552] Support GPU via Python MLContext API |
49,772 | 21.04.2017 18:17:41 | 25,200 | 63e28a37bde5399ed27c58cf09b990290e74e8e4 | [MINOR] Code style updates for `nn`
This commit simply updates the `nn` test suite to make use of the fact
that DML no longer requires dummy variable assignment for functions that
do not return values. | [
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/test/run_tests.dml",
"new_path": "scripts/staging/SystemML-NN/nn/test/run_tests.dml",
"diff": "@@ -30,37 +30,37 @@ print(\"Starting grad checks.\")\nprint(\"---\")\n# Loss & loss-related functions\n-tmp = grad_check::cross_entropy_loss()\n-tmp = grad_check::l1_loss()\n-tmp = grad_check::l1_reg()\n-tmp = grad_check::l2_loss()\n-tmp = grad_check::l2_reg()\n-tmp = grad_check::log_loss()\n+grad_check::cross_entropy_loss()\n+grad_check::l1_loss()\n+grad_check::l1_reg()\n+grad_check::l2_loss()\n+grad_check::l2_reg()\n+grad_check::log_loss()\nprint(\"\")\n# Core layers\n-tmp = grad_check::affine()\n-tmp = grad_check::batch_norm1d()\n-tmp = grad_check::batch_norm2d()\n-tmp = grad_check::conv2d()\n-tmp = grad_check::conv2d_builtin()\n-tmp = grad_check::conv2d_simple()\n-tmp = grad_check::dropout()\n-tmp = grad_check::lstm()\n-tmp = grad_check::max_pool2d()\n-tmp = grad_check::max_pool2d_builtin()\n-tmp = grad_check::max_pool2d_simple()\n-tmp = grad_check::relu()\n-tmp = grad_check::rnn()\n-tmp = grad_check::scale_shift1d()\n-tmp = grad_check::scale_shift2d()\n-tmp = grad_check::sigmoid()\n-tmp = grad_check::softmax()\n-tmp = grad_check::tanh()\n+grad_check::affine()\n+grad_check::batch_norm1d()\n+grad_check::batch_norm2d()\n+grad_check::conv2d()\n+grad_check::conv2d_builtin()\n+grad_check::conv2d_simple()\n+grad_check::dropout()\n+grad_check::lstm()\n+grad_check::max_pool2d()\n+grad_check::max_pool2d_builtin()\n+grad_check::max_pool2d_simple()\n+grad_check::relu()\n+grad_check::rnn()\n+grad_check::scale_shift1d()\n+grad_check::scale_shift2d()\n+grad_check::sigmoid()\n+grad_check::softmax()\n+grad_check::tanh()\nprint(\"\")\n# Example model\n-tmp = grad_check::two_layer_affine_l2_net()\n+grad_check::two_layer_affine_l2_net()\nprint(\"\")\nprint(\"---\")\n@@ -74,14 +74,14 @@ print(\"\")\nprint(\"Starting other tests.\")\nprint(\"---\")\n-tmp = test::batch_norm1d()\n-tmp = test::batch_norm2d()\n-tmp = test::conv2d()\n-tmp = test::cross_entropy_loss()\n-tmp = test::im2col()\n-tmp = test::max_pool2d()\n-tmp = test::padding()\n-tmp = test::tanh()\n+test::batch_norm1d()\n+test::batch_norm2d()\n+test::conv2d()\n+test::cross_entropy_loss()\n+test::im2col()\n+test::max_pool2d()\n+test::padding()\n+test::tanh()\nprint(\"---\")\nprint(\"Other tests complete -- look for any ERRORs or WARNINGs.\")\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/nn/test/test.dml",
"new_path": "scripts/staging/SystemML-NN/nn/test/test.dml",
"diff": "@@ -515,8 +515,8 @@ batch_norm2d = function() {\n0.0294075 0.65676796 -1.53899395 -1.46057391 -0.71558321\n0.61755812 1.36254871 0.18624771 1.36254871 -0.48032296\n-0.71558321 -0.59795308 -1.30373383 1.28412855 -0.63716316\n- 0.18624771 0.30387771 0.06861746 -1.97030437 1.91148913\", rows=1,\n- cols=N*C*Hin*Win)\n+ 0.18624771 0.30387771 0.06861746 -1.97030437 1.91148913\",\n+ rows=1, cols=N*C*Hin*Win)\nout = matrix(out, rows=1, cols=N*C*Hin*Win)\nfor (i in 1:length(out)) {\nrel_error = test_util::check_rel_error(as.scalar(out[1,i]),\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Code style updates for `nn`
This commit simply updates the `nn` test suite to make use of the fact
that DML no longer requires dummy variable assignment for functions that
do not return values. |
49,717 | 22.04.2017 00:55:50 | 25,200 | b481324d06429d3435fcd25a78aef971e5498b6c | [HOTFIX] for missing apache license in CSRPointer | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\npackage org.apache.sysml.runtime.instructions.gpu.context;\nimport static jcuda.jcusparse.JCusparse.cusparseCreateMatDescr;\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] for missing apache license in CSRPointer |
49,738 | 22.04.2017 22:29:40 | 25,200 | edd3491fa07e8897b2f119f438b88e8bf769f100 | [MINOR] Remove unused tests and test utility functions | [
{
"change_type": "DELETE",
"old_path": "src/test/java/org/apache/sysml/test/integration/AutomatedScalabilityTestBase.java",
"new_path": null,
"diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.test.integration;\n-\n-import java.util.ArrayList;\n-\n-import org.junit.After;\n-import org.junit.Before;\n-\n-import org.apache.sysml.test.utils.TestUtils;\n-\n-\n-public abstract class AutomatedScalabilityTestBase extends AutomatedTestBase\n-{\n-\n-\n- private static final boolean RUN_SCALABILITY_TESTS = false;\n-\n- private long[] timeMeasurements;\n- protected int[][] matrixSizes;\n- protected ArrayList<TestMatrixCharacteristics> inputMatrices;\n-\n-\n- @Before\n- public void setUpScalabilityTest() {\n- inputMatrices = new ArrayList<TestMatrixCharacteristics>();\n- }\n-\n- public abstract void setUp();\n-\n- protected void runTest() {\n- if(!RUN_SCALABILITY_TESTS)\n- return;\n-\n- timeMeasurements = new long[matrixSizes.length];\n- for(int i = 0; i < matrixSizes.length; i++) {\n- for(TestMatrixCharacteristics inputMatrix : inputMatrices) {\n- if(inputMatrix.getRows() == -1)\n- inputMatrix.setRows(matrixSizes[i][inputMatrix.getRowsIndexInMatrixSizes()]);\n- if(inputMatrix.getCols() == -1)\n- inputMatrix.setCols(matrixSizes[i][inputMatrix.getColsIndexInMatrixSizes()]);\n- createRandomMatrix(inputMatrix);\n- }\n-\n- for(int j = 0; j < matrixSizes[i].length; j++) {\n- testVariables.put(Integer.toString(j), Integer.toString(matrixSizes[i][j]));\n- }\n-\n- long startingTime = System.currentTimeMillis();\n- super.runTest();\n- long finishingTime = System.currentTimeMillis();\n- timeMeasurements[i] = (finishingTime - startingTime);\n-\n- TestUtils.renameTempDMLScript(baseDirectory + selectedTest + \".dml\");\n- }\n- }\n-\n- protected TestMatrixCharacteristics addInputMatrix(String name, int rows, int cols, double min, double max,\n- double sparsity, long seed) {\n- TestMatrixCharacteristics inputMatrix = new TestMatrixCharacteristics(name, rows, cols, min, max,\n- sparsity, seed);\n- inputMatrices.add(inputMatrix);\n- return inputMatrix;\n- }\n-\n- @After\n- public void displayTimeMeasurements() {\n- if(!RUN_SCALABILITY_TESTS)\n- return;\n-\n- for(long timeMeasurement : timeMeasurements) {\n- System.out.println(\"measured time: \" + timeMeasurement);\n- }\n- }\n-\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/AutomatedTestBase.java",
"diff": "@@ -479,20 +479,6 @@ public abstract class AutomatedTestBase\nreturn matrix;\n}\n- /**\n- * <p>\n- * Generates a random matrix with the specified characteristics and writes\n- * it to a file.\n- * </p>\n- *\n- * @param matrix\n- * matrix characteristics\n- */\n- protected void createRandomMatrix(TestMatrixCharacteristics matrix) {\n- createRandomMatrix(matrix.getMatrixName(), matrix.getRows(), matrix.getCols(), matrix.getMinValue(), matrix\n- .getMaxValue(), matrix.getSparsity(), matrix.getSeed());\n- }\n-\nprivate void cleanupExistingData(String fname, boolean cleanupRData) throws IOException {\nMapReduceTool.deleteFileIfExistOnHDFS(fname);\nMapReduceTool.deleteFileIfExistOnHDFS(fname + \".mtd\");\n"
},
{
"change_type": "DELETE",
"old_path": "src/test/java/org/apache/sysml/test/integration/BinaryMatrixCharacteristics.java",
"new_path": null,
"diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.test.integration;\n-\n-\n-/**\n- * <p>Contains characteristics about a binary matrix.</p>\n- *\n- *\n- */\n-public class BinaryMatrixCharacteristics\n-{\n-\n-\n- private double[][] values;\n- private int rows;\n- private int cols;\n- private int rowsInBlock;\n- private int rowsInLastBlock;\n- private int colsInBlock;\n- private int colsInLastBlock;\n- private long nonZeros;\n-\n-\n- public BinaryMatrixCharacteristics(double[][] values, int rows, int cols, int rowsInBlock, int rowsInLastBlock,\n- int colsInBlock, int colsInLastBlock, long nonZeros) {\n- this.values = values;\n- this.rows = rows;\n- this.cols = cols;\n- this.rowsInBlock = rowsInBlock;\n- this.rowsInLastBlock = rowsInLastBlock;\n- this.colsInBlock = colsInBlock;\n- this.colsInLastBlock = colsInLastBlock;\n- this.nonZeros = nonZeros;\n- }\n-\n- public double[][] getValues() {\n- return values;\n- }\n-\n- public int getRows() {\n- return rows;\n- }\n-\n- public int getCols() {\n- return cols;\n- }\n-\n- public int getRowsInBlock() {\n- return rowsInBlock;\n- }\n-\n- public int getRowsInLastBlock() {\n- return rowsInLastBlock;\n- }\n-\n- public int getColsInBlock() {\n- return colsInBlock;\n- }\n-\n- public int getColsInLastBlock() {\n- return colsInLastBlock;\n- }\n-\n- public long getNonZeros() {\n- return nonZeros;\n- }\n-\n-}\n"
},
{
"change_type": "DELETE",
"old_path": "src/test/java/org/apache/sysml/test/integration/TestMatrixCharacteristics.java",
"new_path": null,
"diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.test.integration;\n-\n-public class TestMatrixCharacteristics\n-{\n-\n-\n- private String matrixName;\n- private int rows;\n- private int rowsIndexInMatrixSizes;\n- private int cols;\n- private int colsIndexInMatrixSizes;\n- private double minValue;\n- private double maxValue;\n- private double sparsity;\n- private long seed;\n-\n-\n- public TestMatrixCharacteristics(String matrixName, int rows, int cols, double minValue, double maxValue,\n- double sparsity, long seed) {\n- this.matrixName = matrixName;\n- this.rows = rows;\n- this.cols = cols;\n- this.minValue = minValue;\n- this.maxValue = maxValue;\n- this.sparsity = sparsity;\n- this.seed = seed;\n- }\n-\n- public String getMatrixName() {\n- return matrixName;\n- }\n-\n- public int getRows() {\n- return rows;\n- }\n-\n- public void setRows(int rows) {\n- this.rows = rows;\n- }\n-\n- public int getRowsIndexInMatrixSizes() {\n- return rowsIndexInMatrixSizes;\n- }\n-\n- public TestMatrixCharacteristics setRowsIndexInMatrixSizes(int rowsIndexInMatrixSizes) {\n- this.rowsIndexInMatrixSizes = rowsIndexInMatrixSizes;\n- return this;\n- }\n-\n- public int getCols() {\n- return cols;\n- }\n-\n- public void setCols(int cols) {\n- this.cols = cols;\n- }\n-\n- public int getColsIndexInMatrixSizes() {\n- return colsIndexInMatrixSizes;\n- }\n-\n- public TestMatrixCharacteristics setColsIndexInMatrixSizes(int colsIndexInMatrixSizes) {\n- this.colsIndexInMatrixSizes = colsIndexInMatrixSizes;\n- return this;\n- }\n-\n- public double getMinValue() {\n- return minValue;\n- }\n-\n- public double getMaxValue() {\n- return maxValue;\n- }\n-\n- public double getSparsity() {\n- return sparsity;\n- }\n-\n- public long getSeed() {\n- return seed;\n- }\n-\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/data/WriteTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/data/WriteTest.java",
"diff": "package org.apache.sysml.test.integration.functions.data;\n-import static org.junit.Assert.*;\n-\nimport org.junit.Test;\n-\n+import org.apache.sysml.runtime.io.MatrixReader;\n+import org.apache.sysml.runtime.io.MatrixReaderFactory;\nimport org.apache.sysml.runtime.matrix.MatrixCharacteristics;\n-import org.apache.sysml.test.integration.BinaryMatrixCharacteristics;\n+import org.apache.sysml.runtime.matrix.data.InputInfo;\n+import org.apache.sysml.runtime.matrix.data.MatrixBlock;\n+import org.apache.sysml.runtime.util.DataConverter;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\nimport org.apache.sysml.test.integration.TestConfiguration;\nimport org.apache.sysml.test.utils.TestUtils;\n@@ -96,16 +97,15 @@ public class WriteTest extends AutomatedTestBase\nrunTest();\n- //compareResults();\n-\n- BinaryMatrixCharacteristics matrix = TestUtils.readBlocksFromSequenceFile(output(\"a\"),1000,1000);\n- assertEquals(rows, matrix.getRows());\n- assertEquals(cols, matrix.getCols());\n- double[][] matrixValues = matrix.getValues();\n- for(int i = 0; i < rows; i++) {\n- for(int j = 0; j < cols; j++) {\n- assertEquals(i + \",\" + j, a[i][j], matrixValues[i][j], 0);\n+ //read and compare output matrix\n+ try {\n+ MatrixReader reader = MatrixReaderFactory.createMatrixReader(InputInfo.BinaryBlockInputInfo);\n+ MatrixBlock mb = reader.readMatrixFromHDFS(output(\"a\"), rows, cols, 1000, 1000, -1);\n+ checkDMLMetaDataFile(\"a\", new MatrixCharacteristics(rows,cols,1000,1000));\n+ TestUtils.compareMatrices(a, DataConverter.convertToDoubleMatrix(mb), rows, cols, 0);\n}\n+ catch(Exception ex) {\n+ throw new RuntimeException(ex);\n}\n}\n"
},
{
"change_type": "DELETE",
"old_path": "src/test/java/org/apache/sysml/test/integration/scalability/LinearRegressionTest.java",
"new_path": null,
"diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.test.integration.scalability;\n-\n-import org.junit.Test;\n-\n-import org.apache.sysml.test.integration.AutomatedScalabilityTestBase;\n-import org.apache.sysml.test.integration.TestConfiguration;\n-import org.apache.sysml.test.utils.TestUtils;\n-\n-\n-\n-public class LinearRegressionTest extends AutomatedScalabilityTestBase\n-{\n-\n- private static final String TEST_DIR = \"test/scripts/scalability/linear_regression/\";\n- private static final String TEST_CLASS_DIR = TEST_DIR + LinearRegressionTest.class.getSimpleName() + \"/\";\n-\n-\n- @Override\n- public void setUp() {\n- TestUtils.clearAssertionInformation();\n- addTestConfiguration(\"LinearRegressionTest\", new TestConfiguration(TEST_CLASS_DIR, \"LinearRegressionTest\", new String[] { \"w\" }));\n- matrixSizes = new int[][] {\n- { 19004, 15436 }\n- };\n- }\n-\n- @Test\n- public void testLinearRegression() {\n- TestConfiguration config = getTestConfiguration(\"LinearRegressionTest\");\n- loadTestConfiguration(config);\n-\n- addInputMatrix(\"g\", -1, -1, 0, 1, 0.00594116, -1).setRowsIndexInMatrixSizes(0).setColsIndexInMatrixSizes(1);\n- addInputMatrix(\"b\", -1, 1, 1, 10, 1, -1).setRowsIndexInMatrixSizes(0);\n-\n- runTest();\n- }\n-\n-}\n"
},
{
"change_type": "DELETE",
"old_path": "src/test/java/org/apache/sysml/test/integration/scalability/PageRankTest.java",
"new_path": null,
"diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.test.integration.scalability;\n-\n-import org.junit.Test;\n-\n-import org.apache.sysml.test.integration.AutomatedScalabilityTestBase;\n-import org.apache.sysml.test.integration.TestConfiguration;\n-\n-\n-\n-public class PageRankTest extends AutomatedScalabilityTestBase\n-{\n- private static final String TEST_DIR = \"test/scripts/scalability/page_rank/\";\n- private static final String TEST_CLASS_DIR = TEST_DIR + PageRankTest.class.getSimpleName() + \"/\";\n-\n-\n- @Override\n- public void setUp() {\n- addTestConfiguration(\"PageRankTest\", new TestConfiguration(TEST_CLASS_DIR, \"PageRankTest\", new String[] { \"p\" }));\n- matrixSizes = new int[][] {\n- { 9914 }\n- };\n- }\n-\n- @Test\n- public void testPageRank() {\n- TestConfiguration config = getTestConfiguration(\"PageRankTest\");\n- loadTestConfiguration(config);\n-\n- addInputMatrix(\"g\", -1, -1, 1, 1, 0.000374962, -1).setRowsIndexInMatrixSizes(0).setColsIndexInMatrixSizes(0);\n- addInputMatrix(\"p\", -1, 1, 1, 1, 1, -1).setRowsIndexInMatrixSizes(0);\n- addInputMatrix(\"e\", -1, 1, 1, 1, 1, -1).setRowsIndexInMatrixSizes(0);\n- addInputMatrix(\"u\", 1, -1, 1, 1, 1, -1).setColsIndexInMatrixSizes(0);\n-\n- runTest();\n- }\n-\n-}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java",
"new_path": "src/test/java/org/apache/sysml/test/utils/TestUtils.java",
"diff": "@@ -40,7 +40,6 @@ import java.text.NumberFormat;\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.HashMap;\n-import java.util.Iterator;\nimport java.util.Locale;\nimport java.util.Random;\nimport java.util.StringTokenizer;\n@@ -58,8 +57,8 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.io.FrameWriter;\nimport org.apache.sysml.runtime.io.FrameWriterFactory;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\n+import org.apache.sysml.runtime.matrix.MatrixCharacteristics;\nimport org.apache.sysml.runtime.matrix.data.FrameBlock;\n-import org.apache.sysml.runtime.matrix.data.IJV;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.MatrixCell;\nimport org.apache.sysml.runtime.matrix.data.MatrixIndexes;\n@@ -67,7 +66,6 @@ import org.apache.sysml.runtime.matrix.data.OutputInfo;\nimport org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\nimport org.apache.sysml.runtime.util.UtilFunctions;\nimport org.apache.sysml.test.integration.AutomatedTestBase;\n-import org.apache.sysml.test.integration.BinaryMatrixCharacteristics;\n/**\n@@ -1048,8 +1046,10 @@ public class TestUtils\n* Checks a matrix against a number of specifications.\n* </p>\n*\n- * @param matrix\n- * matrix\n+ * @param data\n+ * matrix data\n+ * @param mc\n+ * matrix characteristics\n* @param rows\n* number of rows\n* @param cols\n@@ -1059,14 +1059,13 @@ public class TestUtils\n* @param max\n* maximum value\n*/\n- public static void checkMatrix(BinaryMatrixCharacteristics matrix, long rows, long cols, double min, double max) {\n- assertEquals(rows, matrix.getRows());\n- assertEquals(cols, matrix.getCols());\n- double[][] matrixValues = matrix.getValues();\n+ public static void checkMatrix(double[][] data, MatrixCharacteristics mc, long rows, long cols, double min, double max) {\n+ assertEquals(rows, mc.getRows());\n+ assertEquals(cols, mc.getCols());\nfor (int i = 0; i < rows; i++) {\nfor (int j = 0; j < cols; j++) {\nassertTrue(\"invalid value\",\n- ((matrixValues[i][j] >= min && matrixValues[i][j] <= max) || matrixValues[i][j] == 0));\n+ ((data[i][j] >= min && data[i][j] <= max) || data[i][j] == 0));\n}\n}\n}\n@@ -1817,174 +1816,6 @@ public class TestUtils\nreturn false;\n}\n- /**\n- * <p>\n- * Reads binary cells from a file. A matrix characteristic is created which\n- * contains the characteristics of the matrix read from the file and the\n- * values.\n- * </p>\n- *\n- * @param directory\n- * directory containing the matrix\n- * @return matrix characteristics\n- */\n- @SuppressWarnings(\"deprecation\")\n- public static BinaryMatrixCharacteristics readCellsFromSequenceFile(String directory) {\n- try {\n- FileSystem fs = FileSystem.get(conf);\n- FileStatus[] files = fs.listStatus(new Path(directory));\n-\n- HashMap<MatrixIndexes, Double> valueMap = new HashMap<MatrixIndexes, Double>();\n- int rows = 0;\n- int cols = 0;\n- MatrixIndexes indexes = new MatrixIndexes();\n- MatrixCell value = new MatrixCell();\n- for (FileStatus file : files) {\n- SequenceFile.Reader reader = null;\n- try {\n- reader = new SequenceFile.Reader(FileSystem.get(conf), file.getPath(), conf);\n- while (reader.next(indexes, value)) {\n- if (rows < indexes.getRowIndex())\n- rows = (int) indexes.getRowIndex();\n- if (cols < indexes.getColumnIndex())\n- cols = (int) indexes.getColumnIndex();\n- valueMap.put(new MatrixIndexes(indexes), value.getValue());\n- }\n- }\n- finally {\n- IOUtilFunctions.closeSilently(reader);\n- }\n- }\n-\n- double[][] values = new double[rows][cols];\n- long nonZeros = 0;\n- for (MatrixIndexes index : valueMap.keySet()) {\n- values[(int)index.getRowIndex() - 1][(int)index.getColumnIndex() - 1] = valueMap.get(index);\n- if (valueMap.get(index) != 0)\n- nonZeros++;\n- }\n-\n- return new BinaryMatrixCharacteristics(values, rows, cols, 0, 0, 0, 0, nonZeros);\n- } catch (IOException e) {\n- e.printStackTrace();\n- fail(\"unable to read sequence file in \" + directory);\n- }\n-\n- return null;\n- }\n-\n- /**\n- * <p>\n- * Reads binary blocks from a file. A matrix characteristic is created which\n- * contains the characteristics of the matrix read from the file and the\n- * values.\n- * </p>\n- *\n- * @param directory\n- * directory containing the matrix\n- * @param rowsInBlock\n- * rows in block\n- * @param colsInBlock\n- * columns in block\n- * @return matrix characteristics\n- */\n- @SuppressWarnings(\"deprecation\")\n- public static BinaryMatrixCharacteristics readBlocksFromSequenceFile(String directory, int rowsInBlock,\n- int colsInBlock) {\n- try {\n- FileSystem fs = FileSystem.get(conf);\n- FileStatus[] files = fs.listStatus(new Path(directory));\n-\n- HashMap<MatrixIndexes, Double> valueMap = new HashMap<MatrixIndexes, Double>();\n- int rowsInLastBlock = -1;\n- int colsInLastBlock = -1;\n- int rows = 0;\n- int cols = 0;\n- MatrixIndexes indexes = new MatrixIndexes();\n- MatrixBlock value = new MatrixBlock();\n- for (FileStatus file : files) {\n- SequenceFile.Reader reader = new SequenceFile.Reader(FileSystem.get(conf), file.getPath(), conf);\n-\n- try {\n- while (reader.next(indexes, value)) {\n- if (value.getNumRows() < rowsInBlock) {\n- if (rowsInLastBlock == -1)\n- rowsInLastBlock = value.getNumRows();\n- else if (rowsInLastBlock != value.getNumRows())\n- fail(\"invalid block sizes\");\n- rows = (int) ((indexes.getRowIndex() - 1) * rowsInBlock + value.getNumRows());\n- } else if (value.getNumRows() == rowsInBlock) {\n- if (rows <= (indexes.getRowIndex() * rowsInBlock + value.getNumRows())) {\n- if (rowsInLastBlock == -1)\n- rows = (int) ((indexes.getRowIndex() - 1) * rowsInBlock + value.getNumRows());\n- else\n- fail(\"invalid block sizes\");\n- }\n- } else {\n- fail(\"invalid block sizes\");\n- }\n-\n- if (value.getNumColumns() < colsInBlock) {\n- if (colsInLastBlock == -1)\n- colsInLastBlock = value.getNumColumns();\n- else if (colsInLastBlock != value.getNumColumns())\n- fail(\"invalid block sizes\");\n- cols = (int) ((indexes.getColumnIndex() - 1) * colsInBlock + value.getNumColumns());\n- } else if (value.getNumColumns() == colsInBlock) {\n- if (cols <= (indexes.getColumnIndex() * colsInBlock + value.getNumColumns())) {\n- if (colsInLastBlock == -1)\n- cols = (int) ((indexes.getColumnIndex() - 1) * colsInBlock + value.getNumColumns());\n- else\n- fail(\"invalid block sizes\");\n- }\n- } else {\n- fail(\"invalid block sizes\");\n- }\n-\n- if (value.isInSparseFormat()) {\n- Iterator<IJV> iter = value.getSparseBlockIterator();\n- while( iter.hasNext() )\n- {\n- IJV cell = iter.next();\n- valueMap.put(new MatrixIndexes(((indexes.getRowIndex() - 1) * rowsInBlock + cell.getI()),\n- (int) ((indexes.getColumnIndex() - 1) * colsInBlock + cell.getJ())), cell.getV());\n- }\n-\n- } else {\n- double[] valuesInBlock = value.getDenseBlock();\n- for (int i = 0; i < value.getNumRows(); i++) {\n- for (int j = 0; j < value.getNumColumns(); j++) {\n- valueMap.put(new MatrixIndexes(((indexes.getRowIndex() - 1) * rowsInBlock + i),\n- (int) ((indexes.getColumnIndex() - 1) * colsInBlock + j)), valuesInBlock[i\n- * value.getNumColumns() + j]);\n- }\n- }\n- }\n- }\n- }\n- finally {\n- IOUtilFunctions.closeSilently(reader);\n- }\n- }\n-\n- long nonZeros = 0;\n- double[][] values = new double[rows][cols];\n- for (MatrixIndexes index : valueMap.keySet()) {\n- values[(int)index.getRowIndex()][(int)index.getColumnIndex()] = valueMap.get(index);\n- if (valueMap.get(index) != 0)\n- nonZeros++;\n- }\n-\n- return new BinaryMatrixCharacteristics(values, rows, cols, rowsInBlock, rowsInLastBlock, colsInBlock,\n- colsInLastBlock, nonZeros);\n- } catch (IOException e) {\n- e.printStackTrace();\n- fail(\"unable to read sequence file in \" + directory);\n- }\n-\n- return null;\n- }\n-\n/**\n* <p>\n* Returns the path to a file in a directory if it is the only file in the\n"
},
{
"change_type": "DELETE",
"old_path": "src/test_suites/java/org/apache/sysml/test/integration/scalability/ZPackageSuite.java",
"new_path": null,
"diff": "-/*\n- * Licensed to the Apache Software Foundation (ASF) under one\n- * or more contributor license agreements. See the NOTICE file\n- * distributed with this work for additional information\n- * regarding copyright ownership. The ASF licenses this file\n- * to you under the Apache License, Version 2.0 (the\n- * \"License\"); you may not use this file except in compliance\n- * with the License. You may obtain a copy of the License at\n- *\n- * http://www.apache.org/licenses/LICENSE-2.0\n- *\n- * Unless required by applicable law or agreed to in writing,\n- * software distributed under the License is distributed on an\n- * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n- * KIND, either express or implied. See the License for the\n- * specific language governing permissions and limitations\n- * under the License.\n- */\n-\n-package org.apache.sysml.test.integration.scalability;\n-\n-import org.junit.runner.RunWith;\n-import org.junit.runners.Suite;\n-\n-/** Group together the tests in this package into a single suite so that the Maven build\n- * won't run two of them at once. */\n-@RunWith(Suite.class)\n-@Suite.SuiteClasses({\n- LinearRegressionTest.class,\n- PageRankTest.class\n-})\n-\n-\n-/** This class is just a holder for the above JUnit annotations. */\n-public class ZPackageSuite {\n-\n-}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Remove unused tests and test utility functions |
49,738 | 25.04.2017 15:03:49 | 25,200 | eb35b2c90cbacc674793ca4aed4583273d00fa87 | Fix rdd status handling for exports and writes, tests | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/CacheableData.java",
"diff": "@@ -204,7 +204,7 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\n_uniqueID = (int)_seq.getNextID();\n_cacheStatus = CacheStatus.EMPTY;\n_numReadThreads = 0;\n- _gpuObjects = new HashMap<>();\n+ _gpuObjects = new HashMap<GPUContext, GPUObject>();\n}\n/**\n@@ -835,14 +835,19 @@ public abstract class CacheableData<T extends CacheBlock> extends Data\nthrow new CacheException (\"Export to \" + fName + \" failed.\", e);\n}\n}\n- else if( getRDDHandle()!=null && //pending rdd operation\n- !getRDDHandle().allowsShortCircuitRead() )\n+ else if( getRDDHandle()!=null && getRDDHandle().isPending()\n+ && !getRDDHandle().isHDFSFile()\n+ && !getRDDHandle().allowsShortCircuitRead() )\n{\n//CASE 3: pending rdd operation (other than checkpoints)\ntry\n{\n+ //write matrix or frame\nwriteBlobFromRDDtoHDFS(getRDDHandle(), fName, outputFormat);\nwriteMetaData( fName, outputFormat, formatProperties );\n+\n+ //update rdd status\n+ getRDDHandle().setPending(false);\n}\ncatch (Exception e) {\nthrow new CacheException (\"Export to \" + fName + \" failed.\", e);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/MatrixObject.java",
"new_path": "src/main/java/org/apache/sysml/runtime/controlprogram/caching/MatrixObject.java",
"diff": "@@ -488,8 +488,11 @@ public class MatrixObject extends CacheableData<MatrixBlock>\nif( !MapReduceTool.existsFileOnHDFS(_hdfsFileName) ) { //prevent overwrite existing file\nlong newnnz = SparkExecutionContext.writeRDDtoHDFS(lrdd, _hdfsFileName, iimd.getOutputInfo());\n((MatrixDimensionsMetaData) _metaData).getMatrixCharacteristics().setNonZeros(newnnz);\n+ ((RDDObject)rdd).setPending(false); //mark rdd as non-pending (for export)\n((RDDObject)rdd).setHDFSFile(true); //mark rdd as hdfs file (for restore)\nwriteStatus.setValue(true); //mark for no cache-write on read\n+ //note: the flag hdfsFile is actually not entirely correct because we still hold an rdd\n+ //reference to the input not to an rdd of the hdfs file but the resulting behavior is correct\n}\nmb = readBlobFromHDFS(_hdfsFileName);\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/RDDObject.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/data/RDDObject.java",
"diff": "@@ -29,7 +29,8 @@ public class RDDObject extends LineageObject\nprivate boolean _checkpointed = false; //created via checkpoint instruction\nprivate boolean _hdfsfile = false; //created from hdfs file\nprivate String _hdfsFname = null; //hdfs filename, if created from hdfs.\n- private boolean _parRDD = false;\n+ private boolean _parRDD = false; //is a parallelized rdd at driver\n+ private boolean _pending = true; //is a pending rdd operation\npublic RDDObject( JavaPairRDD<?,?> rddvar, String varName) {\nsuper(varName);\n@@ -72,6 +73,14 @@ public class RDDObject extends LineageObject\nreturn _parRDD;\n}\n+ public void setPending(boolean flag) {\n+ _pending = flag;\n+ }\n+\n+ public boolean isPending() {\n+ return _pending;\n+ }\n+\n/**\n* Indicates if rdd is an hdfs file or a checkpoint over an hdfs file;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextParforDatasetTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/mlcontext/MLContextParforDatasetTest.java",
"diff": "@@ -80,25 +80,45 @@ public class MLContextParforDatasetTest extends AutomatedTestBase\n@Test\npublic void testParforDatasetVector() {\n- runMLContextParforDatasetTest(true, false);\n+ runMLContextParforDatasetTest(true, false, false);\n}\n@Test\npublic void testParforDatasetRow() {\n- runMLContextParforDatasetTest(false, false);\n+ runMLContextParforDatasetTest(false, false, false);\n}\n@Test\npublic void testParforDatasetVectorUnkownDims() {\n- runMLContextParforDatasetTest(true, true);\n+ runMLContextParforDatasetTest(true, true, false);\n}\n@Test\npublic void testParforDatasetRowUnknownDims() {\n- runMLContextParforDatasetTest(false, true);\n+ runMLContextParforDatasetTest(false, true, false);\n}\n- private void runMLContextParforDatasetTest(boolean vector, boolean unknownDims)\n+ @Test\n+ public void testParforDatasetVectorMulti() {\n+ runMLContextParforDatasetTest(true, false, true);\n+ }\n+\n+ @Test\n+ public void testParforDatasetRowMulti() {\n+ runMLContextParforDatasetTest(false, false, true);\n+ }\n+\n+ @Test\n+ public void testParforDatasetVectorUnkownDimsMulti() {\n+ runMLContextParforDatasetTest(true, true, true);\n+ }\n+\n+ @Test\n+ public void testParforDatasetRowUnknownDimsMulti() {\n+ runMLContextParforDatasetTest(false, true, true);\n+ }\n+\n+ private void runMLContextParforDatasetTest(boolean vector, boolean unknownDims, boolean multiInputs)\n{\n//modify memory budget to trigger fused datapartition-execute\nlong oldmem = InfrastructureAnalyzer.getLocalMaxMemory();\n@@ -119,21 +139,30 @@ public class MLContextParforDatasetTest extends AutomatedTestBase\nMatrixMetadata mm = new MatrixMetadata(vector ? MatrixFormat.DF_VECTOR_WITH_INDEX : MatrixFormat.DF_DOUBLES_WITH_INDEX);\nmm.setMatrixCharacteristics(mc2);\n- String s = \"v = matrix(0, rows=nrow(X), cols=1)\"\n+ String s1 = \"v = matrix(0, rows=nrow(X), cols=1)\"\n+ \"parfor(i in 1:nrow(X), log=DEBUG) {\"\n+ \" v[i, ] = sum(X[i, ]);\"\n+ \"}\"\n+ \"r = sum(v);\";\n+ String s2 = \"v = matrix(0, rows=nrow(X), cols=1)\"\n+ +\"Y = X;\"\n+ + \"parfor(i in 1:nrow(X), log=DEBUG) {\"\n+ + \" v[i, ] = sum(X[i, ]+Y[i, ]);\"\n+ + \"}\"\n+ + \"r = sum(v);\";\n+ String s = multiInputs ? s2 : s1;\n+\nScript script = dml(s).in(\"X\", df, mm).out(\"r\");\nMLResults results = ml.execute(script);\n//compare aggregation results\ndouble sum1 = results.getDouble(\"r\");\n- double sum2 = mbA.sum();\n+ double sum2 = mbA.sum() * (multiInputs ? 2 : 1);\nTestUtils.compareScalars(sum2, sum1, 0.000001);\n}\ncatch(Exception ex) {\n+ ex.printStackTrace();\nthrow new RuntimeException(ex);\n}\nfinally {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1466] Fix rdd status handling for exports and writes, tests |
49,772 | 26.04.2017 14:40:40 | 25,200 | 1f5cf697c49313861a3bdbcc634f7a56daabdc16 | Move `examples` into `nn` | [
{
"change_type": "MODIFY",
"old_path": "scripts/staging/SystemML-NN/README.md",
"new_path": "scripts/staging/SystemML-NN/README.md",
"diff": "@@ -22,7 +22,7 @@ limitations under the License.\n### A deep learning library for [Apache SystemML](https://github.com/apache/incubator-systemml).\n## Examples:\n-#### Please see the [`examples`](examples) folder for more detailed examples, or view the following two quick examples.\n+#### Please see the [`examples`](nn/examples) folder for more detailed examples, or view the following two quick examples.\n### Neural net for regression with vanilla SGD:\n```python\n# Imports\n"
},
{
"change_type": "DELETE",
"old_path": "scripts/staging/SystemML-NN/examples/nn",
"new_path": null,
"diff": "-../nn\n\\ No newline at end of file\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/Example - MNIST LeNet.ipynb",
"new_path": "scripts/staging/SystemML-NN/nn/examples/Example - MNIST LeNet.ipynb",
"diff": "{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"# Create a SystemML MLContext object\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"%%sh\\n\",\n\"mkdir -p data/mnist/\\n\",\n\"cd data/mnist/\\n\",\n- \"curl -O http://pjreddie.com/media/files/mnist_train.csv\\n\",\n- \"curl -O http://pjreddie.com/media/files/mnist_test.csv\"\n+ \"curl -O https://pjreddie.com/media/files/mnist_train.csv\\n\",\n+ \"curl -O https://pjreddie.com/media/files/mnist_test.csv\"\n]\n},\n{\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"script_string = \\\"\\\"\\\"\\n\",\n- \"source(\\\"mnist_lenet.dml\\\") as mnist_lenet\\n\",\n+ \"source(\\\"nn/examples/mnist_lenet.dml\\\") as mnist_lenet\\n\",\n\"\\n\",\n\"# Read training data\\n\",\n\"data = read($data, format=\\\"csv\\\")\\n\",\n\"y_val = labels[1:5000,]\\n\",\n\"\\n\",\n\"# Train\\n\",\n- \"[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win)\\n\",\n+ \"epochs = 10\\n\",\n+ \"[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win, epochs)\\n\",\n\"\\\"\\\"\\\"\\n\",\n\"script = (dml(script_string).input(\\\"$data\\\", \\\"data/mnist/mnist_train.csv\\\")\\n\",\n\" .input(C=1, Hin=28, Win=28)\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"script_string = \\\"\\\"\\\"\\n\",\n- \"source(\\\"mnist_lenet.dml\\\") as mnist_lenet\\n\",\n+ \"source(\\\"nn/examples/mnist_lenet.dml\\\") as mnist_lenet\\n\",\n\"\\n\",\n\"# Read test data\\n\",\n\"data = read($data, format=\\\"csv\\\")\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"W1_df = W1.toDF()\\n\",\n],\n\"metadata\": {\n\"kernelspec\": {\n- \"display_name\": \"Python 3\",\n+ \"display_name\": \"Python 3 + Spark 2.x + SystemML\",\n\"language\": \"python\",\n- \"name\": \"python3\"\n+ \"name\": \"pyspark3_2.x\"\n},\n\"language_info\": {\n\"codemirror_mode\": {\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n\"pygments_lexer\": \"ipython3\",\n- \"version\": \"3.5.2\"\n+ \"version\": \"3.6.1\"\n}\n},\n\"nbformat\": 4,\n- \"nbformat_minor\": 0\n+ \"nbformat_minor\": 1\n}\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/Example - MNIST Softmax Classifier.ipynb",
"new_path": "scripts/staging/SystemML-NN/nn/examples/Example - MNIST Softmax Classifier.ipynb",
"diff": "\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": false,\n\"scrolled\": false\n},\n\"outputs\": [],\n\"%%sh\\n\",\n\"mkdir -p data/mnist/\\n\",\n\"cd data/mnist/\\n\",\n- \"curl -O http://pjreddie.com/media/files/mnist_train.csv\\n\",\n- \"curl -O http://pjreddie.com/media/files/mnist_test.csv\"\n+ \"curl -O https://pjreddie.com/media/files/mnist_train.csv\\n\",\n+ \"curl -O https://pjreddie.com/media/files/mnist_test.csv\"\n]\n},\n{\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"training = \\\"\\\"\\\"\\n\",\n- \"source(\\\"mnist_softmax.dml\\\") as mnist_softmax\\n\",\n+ \"source(\\\"nn/examples/mnist_softmax.dml\\\") as mnist_softmax\\n\",\n\"\\n\",\n\"# Read training data\\n\",\n\"data = read($data, format=\\\"csv\\\")\\n\",\n\"y_val = labels[1:5000,]\\n\",\n\"\\n\",\n\"# Train\\n\",\n- \"[W, b] = mnist_softmax::train(X, y, X_val, y_val)\\n\",\n+ \"epochs = 1\\n\",\n+ \"[W, b] = mnist_softmax::train(X, y, X_val, y_val, epochs)\\n\",\n\"\\\"\\\"\\\"\\n\",\n\"script = dml(training).input(\\\"$data\\\", \\\"data/mnist/mnist_train.csv\\\").output(\\\"W\\\", \\\"b\\\")\\n\",\n\"W, b = ml.execute(script).get(\\\"W\\\", \\\"b\\\")\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"testing = \\\"\\\"\\\"\\n\",\n- \"source(\\\"mnist_softmax.dml\\\") as mnist_softmax\\n\",\n+ \"source(\\\"nn/examples/mnist_softmax.dml\\\") as mnist_softmax\\n\",\n\"\\n\",\n\"# Read test data\\n\",\n\"data = read($data, format=\\\"csv\\\")\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": false\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"W_df = W.toDF()\\n\",\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n\"pygments_lexer\": \"ipython3\",\n- \"version\": \"3.5.2\"\n+ \"version\": \"3.6.1\"\n}\n},\n\"nbformat\": 4,\n- \"nbformat_minor\": 0\n+ \"nbformat_minor\": 1\n}\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/README.md",
"new_path": "scripts/staging/SystemML-NN/nn/examples/README.md",
"diff": "@@ -47,13 +47,12 @@ limitations under the License.\n* To run the examples, please first download and unzip the project via GitHub using the \"Clone or download\" button on the [homepage of the project](https://github.com/dusenberrymw/systemml-nn), *or* via the following commands:\n```\n- curl -LO https://github.com/dusenberrymw/systemml-nn/archive/master.zip\n- unzip master.zip\n+ git clone https://github.com/dusenberrymw/systemml-nn.git\n```\n-* Then, move into the `examples` folder via:\n+* Then, move into the `systemml-nn` folder via:\n```\n- cd systemml-nn-master/examples/\n+ cd systemml-nn\n```\n## Data\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/get_mnist_data.sh",
"new_path": "scripts/staging/SystemML-NN/nn/examples/get_mnist_data.sh",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-predict.dml",
"new_path": "scripts/staging/SystemML-NN/nn/examples/mnist_lenet-predict.dml",
"diff": "# where each example is a 28x28 pixel image of grayscale values in\n# the range [0,255] stretched out as 784 pixels.\n#\n-# Sample Invocation:\n-# Execute using Spark\n+# Sample Invocation (running from outside the `nn` folder):\n+# 1. Download images.\n+#\n+# For example, save images to `nn/examples/data/mnist/images.csv`.\n+#\n+# 2. Execute using Spark\n# ```\n# spark-submit --master local[*] --driver-memory 5G\n-# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n-# $SYSTEMML_HOME/target/SystemML.jar -f mnist_lenet-predict.dml\n-# -nvargs X=data/mnist/images.csv C=1 Hin=28 Win=28\n-# model_dir=model/mnist_lenet out_dir=data/mnist\n+# --conf spark.driver.maxResultSize=0 --conf spark.rpc.message.maxSize=128\n+# $SYSTEMML_HOME/target/SystemML.jar -f nn/examples/mnist_lenet-predict.dml\n+# -nvargs X=nn/examples/data/mnist/images.csv C=1 Hin=28 Win=28\n+# model_dir=nn/examples/model/mnist_lenet out_dir=nn/examples/data/mnist\n# ```\n#\n-source(\"mnist_lenet.dml\") as mnist_lenet\n+source(\"nn/examples/mnist_lenet.dml\") as mnist_lenet\n# Read training data\nfmt = ifdef($fmt, \"csv\")\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_lenet-train.dml",
"new_path": "scripts/staging/SystemML-NN/nn/examples/mnist_lenet-train.dml",
"diff": "# the range [0,255] stretched out as 784 pixels, and each label is\n# one of 10 possible digits in [0,9].\n#\n-# Sample Invocation (running from wihtin the `examples` folder):\n+# Sample Invocation (running from outside the `nn` folder):\n# 1. Download data (60,000 training examples, and 10,000 test examples)\n# ```\n-# get_mnist_data.sh\n+# nn/examples/get_mnist_data.sh\n# ```\n#\n# 2. Execute using Spark\n# ```\n# spark-submit --master local[*] --driver-memory 10G\n-# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n-# $SYSTEMML_HOME/target/SystemML.jar -f mnist_lenet-train.dml\n-# -nvargs train=data/mnist/mnist_train.csv test=data/mnist/mnist_test.csv\n-# C=1 Hin=28 Win=28 epochs=10 out_dir=model/mnist_lenet\n+# --conf spark.driver.maxResultSize=0 --conf spark.rpc.message.maxSize=128\n+# $SYSTEMML_HOME/target/SystemML.jar -f nn/examples/mnist_lenet-train.dml\n+# -nvargs train=nn/examples/data/mnist/mnist_train.csv test=nn/examples/data/mnist/mnist_test.csv\n+# C=1 Hin=28 Win=28 epochs=10 out_dir=nn/examples/model/mnist_lenet\n# ```\n#\n-source(\"mnist_lenet.dml\") as mnist_lenet\n+source(\"nn/examples/mnist_lenet.dml\") as mnist_lenet\n# Read training data & settings\nfmt = ifdef($fmt, \"csv\")\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_lenet.dml",
"new_path": "scripts/staging/SystemML-NN/nn/examples/mnist_lenet.dml",
"diff": ""
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_softmax-predict.dml",
"new_path": "scripts/staging/SystemML-NN/nn/examples/mnist_softmax-predict.dml",
"diff": "# the range [0,255] stretched out as 784 pixels.\n#\n# Sample Invocation:\n-# Execute using Spark\n+# 1. Download images.\n+#\n+# For example, save images to `nn/examples/data/mnist/images.csv`.\n+#\n+# 2. Execute using Spark\n# ```\n# spark-submit --master local[*] --driver-memory 5G\n-# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n-# $SYSTEMML_HOME/target/SystemML.jar -f mnist_softmax-predict.dml\n-# -nvargs X=data/mnist/images.csv model_dir=model/mnist_softmax\n-# out_dir=data/mnist\n-# ```\n+# --conf spark.driver.maxResultSize=0 --conf spark.rpc.message.maxSize=128\n+# $SYSTEMML_HOME/target/SystemML.jar -f nn/examples/mnist_softmax-predict.dml\n+# -nvargs X=nn/examples/data/mnist/images.csv\n+# model_dir=nn/examples/model/mnist_softmax out_dir=nn/examples/data/mnist\n#\n-source(\"mnist_softmax.dml\") as mnist_softmax\n+source(\"nn/examples/mnist_softmax.dml\") as mnist_softmax\n# Read training data\nfmt = ifdef($fmt, \"csv\")\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_softmax-train.dml",
"new_path": "scripts/staging/SystemML-NN/nn/examples/mnist_softmax-train.dml",
"diff": "# Sample Invocation (running from wihtin the `examples` folder):\n# 1. Download data (60,000 training examples, and 10,000 test examples)\n# ```\n-# get_mnist_data.sh\n+# nn/examples/get_mnist_data.sh\n# ```\n#\n# 2. Execute using Spark\n# ```\n-# spark-submit --master local[*] --driver-memory 5G\n-# --conf spark.driver.maxResultSize=0 --conf spark.akka.frameSize=128\n-# $SYSTEMML_HOME/target/SystemML.jar -f mnist_softmax-train.dml\n-# -nvargs train=data/mnist/mnist_train.csv test=data/mnist/mnist_test.csv\n-# out_dir=model/mnist_softmax\n+# spark-submit --master local[*] --driver-memory 10G\n+# --conf spark.driver.maxResultSize=0 --conf spark.rpc.message.maxSize=128\n+# $SYSTEMML_HOME/target/SystemML.jar -f nn/examples/mnist_softmax-train.dml\n+# -nvargs train=nn/examples/data/mnist/mnist_train.csv test=nn/examples/data/mnist/mnist_test.csv\n+# epochs=1 out_dir=nn/examples/model/mnist_softmax\n# ```\n#\n-source(\"mnist_softmax.dml\") as mnist_softmax\n+source(\"nn/examples/mnist_softmax.dml\") as mnist_softmax\n# Read training data\nfmt = ifdef($fmt, \"csv\")\ntrain = read($train, format=fmt)\ntest = read($test, format=fmt)\n+epochs = ifdef($epochs, 1)\n+out_dir = ifdef($out_dir, \".\")\n# Extract images and labels\nimages = train[,2:ncol(train)]\n@@ -89,11 +91,11 @@ y = labels[5001:nrow(images),]\ny_val = labels[1:5000,]\n# Train\n-[W, b] = mnist_softmax::train(X, y, X_val, y_val)\n+[W, b] = mnist_softmax::train(X, y, X_val, y_val, epochs)\n# Write model out\n-write(W, $out_dir+\"/W\")\n-write(b, $out_dir+\"/b\")\n+write(W, out_dir+\"/W\")\n+write(b, out_dir+\"/b\")\n# Eval on test set\nprobs = mnist_softmax::predict(X_test, W, b)\n@@ -101,7 +103,7 @@ probs = mnist_softmax::predict(X_test, W, b)\n# Output results\nprint(\"Test Accuracy: \" + accuracy)\n-write(accuracy, $out_dir+\"/accuracy\")\n+write(accuracy, out_dir+\"/accuracy\")\nprint(\"\")\nprint(\"\")\n"
},
{
"change_type": "RENAME",
"old_path": "scripts/staging/SystemML-NN/examples/mnist_softmax.dml",
"new_path": "scripts/staging/SystemML-NN/nn/examples/mnist_softmax.dml",
"diff": "@@ -29,7 +29,8 @@ source(\"nn/layers/softmax.dml\") as softmax\nsource(\"nn/optim/sgd_nesterov.dml\") as sgd_nesterov\ntrain = function(matrix[double] X, matrix[double] y,\n- matrix[double] X_val, matrix[double] y_val)\n+ matrix[double] X_val, matrix[double] y_val,\n+ int epochs)\nreturn (matrix[double] W, matrix[double] b) {\n/*\n* Trains a softmax classifier.\n@@ -42,6 +43,7 @@ train = function(matrix[double] X, matrix[double] y,\n* - y: Target matrix, of shape (N, K).\n* - X_val: Input validation data matrix, of shape (N, C*Hin*Win).\n* - y_val: Target validation matrix, of shape (N, K).\n+ * - epochs: Total number of full training loops over the full data set.\n*\n* Outputs:\n* - W: Weights (parameters) matrix, of shape (D, M).\n@@ -66,7 +68,6 @@ train = function(matrix[double] X, matrix[double] y,\n# Optimize\nprint(\"Starting optimization\")\nbatch_size = 50\n- epochs = 1\niters = 1000 #ceil(N / batch_size)\nfor (e in 1:epochs) {\nfor(i in 1:iters) {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1524] Move `examples` into `nn` |
49,772 | 26.04.2017 15:32:25 | 25,200 | 2a4e4f8b30fce0ea8a4a6cfec9c0cced377534ad | [MINOR] Naming updates in the MNIST LeNet & softmax examples, and README fix. | [
{
"change_type": "MODIFY",
"old_path": "scripts/nn/README.md",
"new_path": "scripts/nn/README.md",
"diff": "@@ -22,7 +22,7 @@ limitations under the License.\n### A deep learning library for [Apache SystemML](https://github.com/apache/incubator-systemml).\n## Examples:\n-#### Please see the [`examples`](nn/examples) folder for more detailed examples, or view the following two quick examples.\n+#### Please see the [`examples`](examples) folder for more detailed examples, or view the following two quick examples.\n### Neural net for regression with vanilla SGD:\n```python\n# Imports\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/examples/mnist_lenet-train.dml",
"new_path": "scripts/nn/examples/mnist_lenet-train.dml",
"diff": "@@ -81,7 +81,7 @@ out_dir = ifdef($out_dir, \".\")\nimages = train[,2:ncol(train)]\nlabels = train[,1]\nX_test = test[,2:ncol(test)]\n-y_test = test[,1]\n+Y_test = test[,1]\n# Scale images to [-1,1], and one-hot encode the labels\nn = nrow(train)\n@@ -89,16 +89,16 @@ n_test = nrow(test)\nimages = (images / 255.0) * 2 - 1\nlabels = table(seq(1, n), labels+1, n, 10)\nX_test = (X_test / 255.0) * 2 - 1\n-y_test = table(seq(1, n_test), y_test+1, n_test, 10)\n+Y_test = table(seq(1, n_test), Y_test+1, n_test, 10)\n# Split into training (55,000 examples) and validation (5,000 examples)\nX = images[5001:nrow(images),]\nX_val = images[1:5000,]\n-y = labels[5001:nrow(images),]\n-y_val = labels[1:5000,]\n+Y = labels[5001:nrow(images),]\n+Y_val = labels[1:5000,]\n# Train\n-[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, y, X_val, y_val, C, Hin, Win, epochs)\n+[W1, b1, W2, b2, W3, b3, W4, b4] = mnist_lenet::train(X, Y, X_val, Y_val, C, Hin, Win, epochs)\n# Write model out\nwrite(W1, out_dir+\"/W1\")\n@@ -112,7 +112,7 @@ write(b4, out_dir+\"/b4\")\n# Eval on test set\nprobs = mnist_lenet::predict(X_test, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)\n-[loss, accuracy] = mnist_lenet::eval(probs, y_test)\n+[loss, accuracy] = mnist_lenet::eval(probs, Y_test)\n# Output results\nprint(\"Test Accuracy: \" + accuracy)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/examples/mnist_lenet.dml",
"new_path": "scripts/nn/examples/mnist_lenet.dml",
"diff": "@@ -33,8 +33,8 @@ source(\"nn/layers/relu.dml\") as relu\nsource(\"nn/layers/softmax.dml\") as softmax\nsource(\"nn/optim/sgd_nesterov.dml\") as sgd_nesterov\n-train = function(matrix[double] X, matrix[double] y,\n- matrix[double] X_val, matrix[double] y_val,\n+train = function(matrix[double] X, matrix[double] Y,\n+ matrix[double] X_val, matrix[double] Y_val,\nint C, int Hin, int Win, int epochs)\nreturn (matrix[double] W1, matrix[double] b1,\nmatrix[double] W2, matrix[double] b2,\n@@ -44,14 +44,14 @@ train = function(matrix[double] X, matrix[double] y,\n* Trains a convolutional net using the \"LeNet\" architecture.\n*\n* The input matrix, X, has N examples, each represented as a 3D\n- * volume unrolled into a single vector. The targets, y, have K\n+ * volume unrolled into a single vector. The targets, Y, have K\n* classes, and are one-hot encoded.\n*\n* Inputs:\n* - X: Input data matrix, of shape (N, C*Hin*Win).\n- * - y: Target matrix, of shape (N, K).\n+ * - Y: Target matrix, of shape (N, K).\n* - X_val: Input validation data matrix, of shape (N, C*Hin*Win).\n- * - y_val: Target validation matrix, of shape (N, K).\n+ * - Y_val: Target validation matrix, of shape (N, K).\n* - C: Number of input channels (dimensionality of input depth).\n* - Hin: Input height.\n* - Win: Input width.\n@@ -68,7 +68,7 @@ train = function(matrix[double] X, matrix[double] y,\n* - b4: 4th layer biases vector, of shape (1, K).\n*/\nN = nrow(X)\n- K = ncol(y)\n+ K = ncol(Y)\n# Create network:\n# conv1 -> relu1 -> pool1 -> conv2 -> relu2 -> pool2 -> affine3 -> relu3 -> affine4 -> softmax\n@@ -110,7 +110,7 @@ train = function(matrix[double] X, matrix[double] y,\nbeg = ((i-1) * batch_size) %% N + 1\nend = min(N, beg + batch_size - 1)\nX_batch = X[beg:end,]\n- y_batch = y[beg:end,]\n+ y_batch = Y[beg:end,]\n# Compute forward pass\n## layer 1: conv1 -> relu1 -> pool1\n@@ -146,8 +146,8 @@ train = function(matrix[double] X, matrix[double] y,\n# Compute validation loss & accuracy\nprobs_val = predict(X_val, C, Hin, Win, W1, b1, W2, b2, W3, b3, W4, b4)\n- loss_val = cross_entropy_loss::forward(probs_val, y_val)\n- accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(y_val))\n+ loss_val = cross_entropy_loss::forward(probs_val, Y_val)\n+ accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))\n# Output results\nprint(\"Epoch: \" + e + \", Iter: \" + i + \", Train Loss: \" + loss + \", Train Accuracy: \"\n@@ -283,37 +283,37 @@ predict = function(matrix[double] X, int C, int Hin, int Win,\n}\n}\n-eval = function(matrix[double] probs, matrix[double] y)\n+eval = function(matrix[double] probs, matrix[double] Y)\nreturn (double loss, double accuracy) {\n/*\n* Evaluates a convolutional net using the \"LeNet\" architecture.\n*\n* The probs matrix contains the class probability predictions\n- * of K classes over N examples. The targets, y, have K classes,\n+ * of K classes over N examples. The targets, Y, have K classes,\n* and are one-hot encoded.\n*\n* Inputs:\n* - probs: Class probabilities, of shape (N, K).\n- * - y: Target matrix, of shape (N, K).\n+ * - Y: Target matrix, of shape (N, K).\n*\n* Outputs:\n* - loss: Scalar loss, of shape (1).\n* - accuracy: Scalar accuracy, of shape (1).\n*/\n# Compute loss & accuracy\n- loss = cross_entropy_loss::forward(probs, y)\n- correct_pred = rowIndexMax(probs) == rowIndexMax(y)\n+ loss = cross_entropy_loss::forward(probs, Y)\n+ correct_pred = rowIndexMax(probs) == rowIndexMax(Y)\naccuracy = mean(correct_pred)\n}\ngenerate_dummy_data = function()\n- return (matrix[double] X, matrix[double] y, int C, int Hin, int Win) {\n+ return (matrix[double] X, matrix[double] Y, int C, int Hin, int Win) {\n/*\n* Generate a dummy dataset similar to the MNIST dataset.\n*\n* Outputs:\n* - X: Input data matrix, of shape (N, D).\n- * - y: Target matrix, of shape (N, K).\n+ * - Y: Target matrix, of shape (N, K).\n* - C: Number of input channels (dimensionality of input depth).\n* - Hin: Input height.\n* - Win: Input width.\n@@ -326,6 +326,6 @@ generate_dummy_data = function()\nK = 10 # num target classes\nX = rand(rows=N, cols=C*Hin*Win, pdf=\"normal\")\nclasses = round(rand(rows=N, cols=1, min=1, max=K, pdf=\"uniform\"))\n- y = table(seq(1, N), classes) # one-hot encoding\n+ Y = table(seq(1, N), classes) # one-hot encoding\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/examples/mnist_softmax-train.dml",
"new_path": "scripts/nn/examples/mnist_softmax-train.dml",
"diff": "@@ -73,7 +73,7 @@ out_dir = ifdef($out_dir, \".\")\nimages = train[,2:ncol(train)]\nlabels = train[,1]\nX_test = test[,2:ncol(test)]\n-y_test = test[,1]\n+Y_test = test[,1]\n# Scale images to [0,1], and one-hot encode the labels\nn = nrow(train)\n@@ -82,16 +82,16 @@ classes = 10\nimages = images / 255.0\nlabels = table(seq(1, n), labels+1, n, classes)\nX_test = X_test / 255.0\n-y_test = table(seq(1, n_test), y_test+1, n_test, classes)\n+Y_test = table(seq(1, n_test), Y_test+1, n_test, classes)\n# Split into training (55,000 examples) and validation (5,000 examples)\nX = images[5001:nrow(images),]\nX_val = images[1:5000,]\n-y = labels[5001:nrow(images),]\n-y_val = labels[1:5000,]\n+Y = labels[5001:nrow(images),]\n+Y_val = labels[1:5000,]\n# Train\n-[W, b] = mnist_softmax::train(X, y, X_val, y_val, epochs)\n+[W, b] = mnist_softmax::train(X, Y, X_val, Y_val, epochs)\n# Write model out\nwrite(W, out_dir+\"/W\")\n@@ -99,7 +99,7 @@ write(b, out_dir+\"/b\")\n# Eval on test set\nprobs = mnist_softmax::predict(X_test, W, b)\n-[loss, accuracy] = mnist_softmax::eval(probs, y_test)\n+[loss, accuracy] = mnist_softmax::eval(probs, Y_test)\n# Output results\nprint(\"Test Accuracy: \" + accuracy)\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/examples/mnist_softmax.dml",
"new_path": "scripts/nn/examples/mnist_softmax.dml",
"diff": "@@ -28,21 +28,21 @@ source(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\nsource(\"nn/layers/softmax.dml\") as softmax\nsource(\"nn/optim/sgd_nesterov.dml\") as sgd_nesterov\n-train = function(matrix[double] X, matrix[double] y,\n- matrix[double] X_val, matrix[double] y_val,\n+train = function(matrix[double] X, matrix[double] Y,\n+ matrix[double] X_val, matrix[double] Y_val,\nint epochs)\nreturn (matrix[double] W, matrix[double] b) {\n/*\n* Trains a softmax classifier.\n*\n* The input matrix, X, has N examples, each with D features.\n- * The targets, y, have K classes, and are one-hot encoded.\n+ * The targets, Y, have K classes, and are one-hot encoded.\n*\n* Inputs:\n* - X: Input data matrix, of shape (N, D).\n- * - y: Target matrix, of shape (N, K).\n+ * - Y: Target matrix, of shape (N, K).\n* - X_val: Input validation data matrix, of shape (N, C*Hin*Win).\n- * - y_val: Target validation matrix, of shape (N, K).\n+ * - Y_val: Target validation matrix, of shape (N, K).\n* - epochs: Total number of full training loops over the full data set.\n*\n* Outputs:\n@@ -51,7 +51,7 @@ train = function(matrix[double] X, matrix[double] y,\n*/\nN = nrow(X) # num examples\nD = ncol(X) # num features\n- K = ncol(y) # num classes\n+ K = ncol(Y) # num classes\n# Create softmax classifier:\n# affine -> softmax\n@@ -75,7 +75,7 @@ train = function(matrix[double] X, matrix[double] y,\nbeg = ((i-1) * batch_size) %% N + 1\nend = min(N, beg + batch_size - 1)\nX_batch = X[beg:end,]\n- y_batch = y[beg:end,]\n+ y_batch = Y[beg:end,]\n# Compute forward pass\n## affine & softmax:\n@@ -86,8 +86,8 @@ train = function(matrix[double] X, matrix[double] y,\nloss = cross_entropy_loss::forward(probs, y_batch)\naccuracy = mean(rowIndexMax(probs) == rowIndexMax(y_batch))\nprobs_val = predict(X_val, W, b)\n- loss_val = cross_entropy_loss::forward(probs_val, y_val)\n- accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(y_val))\n+ loss_val = cross_entropy_loss::forward(probs_val, Y_val)\n+ accuracy_val = mean(rowIndexMax(probs_val) == rowIndexMax(Y_val))\nprint(\"Epoch: \" + e + \", Iter: \" + i + \", Train Loss: \" + loss + \", Train Accuracy: \" +\naccuracy + \", Val Loss: \" + loss_val + \", Val Accuracy: \" + accuracy_val)\n@@ -130,37 +130,37 @@ predict = function(matrix[double] X, matrix[double] W, matrix[double] b)\nprobs = softmax::forward(out)\n}\n-eval = function(matrix[double] probs, matrix[double] y)\n+eval = function(matrix[double] probs, matrix[double] Y)\nreturn (double loss, double accuracy) {\n/*\n* Evaluates a softmax classifier.\n*\n* The probs matrix contains the class probability predictions\n- * of K classes over N examples. The targets, y, have K classes,\n+ * of K classes over N examples. The targets, Y, have K classes,\n* and are one-hot encoded.\n*\n* Inputs:\n* - probs: Class probabilities, of shape (N, K).\n- * - y: Target matrix, of shape (N, K).\n+ * - Y: Target matrix, of shape (N, K).\n*\n* Outputs:\n* - loss: Scalar loss, of shape (1).\n* - accuracy: Scalar accuracy, of shape (1).\n*/\n# Compute loss & accuracy\n- loss = cross_entropy_loss::forward(probs, y)\n- correct_pred = rowIndexMax(probs) == rowIndexMax(y)\n+ loss = cross_entropy_loss::forward(probs, Y)\n+ correct_pred = rowIndexMax(probs) == rowIndexMax(Y)\naccuracy = mean(correct_pred)\n}\ngenerate_dummy_data = function()\n- return (matrix[double] X, matrix[double] y, int C, int Hin, int Win) {\n+ return (matrix[double] X, matrix[double] Y, int C, int Hin, int Win) {\n/*\n* Generate a dummy dataset similar to the MNIST dataset.\n*\n* Outputs:\n* - X: Input data matrix, of shape (N, D).\n- * - y: Target matrix, of shape (N, K).\n+ * - Y: Target matrix, of shape (N, K).\n* - C: Number of input channels (dimensionality of input depth).\n* - Hin: Input height.\n* - Win: Input width.\n@@ -173,6 +173,6 @@ generate_dummy_data = function()\nT = 10 # num targets\nX = rand(rows=N, cols=C*Hin*Win, pdf=\"normal\")\nclasses = round(rand(rows=N, cols=1, min=1, max=T, pdf=\"uniform\"))\n- y = table(seq(1, N), classes) # one-hot encoding\n+ Y = table(seq(1, N), classes) # one-hot encoding\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Naming updates in the MNIST LeNet & softmax examples, and README fix. |
49,736 | 26.04.2017 19:24:09 | 28,800 | 39351749f76e2703b9be43d3b08b348a7e115384 | [HOTFIX] Bugfix for bias_add and bias_multiply operations | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java",
"diff": "@@ -709,26 +709,33 @@ public class LibMatrixDNN {\nint K = bias.getNumRows();\nint PQ = input.getNumColumns() / K;\n- ConvolutionParameters params = new ConvolutionParameters(N, PQ, -1, -1, K, -1, -1, -1, -1, -1, -1, numThreads);\n- params.input1 = input;\n- params.input2 = bias;\n- params.output = outputBlock;\n-\nif(bias.getNumColumns() != 1 || input.getNumColumns() % K != 0) {\nthrow new DMLRuntimeException(\"Incorrect inputs for bias_add: input[\" + N + \" X \" + input.getNumColumns() + \"] and bias[\" + K + \" X \" + bias.getNumColumns() + \"]\");\n}\n- if(input.isEmptyBlock()) {\ndouble [] outputArray = outputBlock.getDenseBlock();\n+ if(input.isEmptyBlock()) {\nfor(int n = 0; n < N; n++)\nConvolutionUtils.fillBias(bias, outputArray, n, n+1, N, K, PQ);\n}\nelse {\n- runConvTask(TaskType.BiasAdd, params);\n+ // Handles both dense and sparse inputs and copies it to dense output\n+ outputBlock.copy(input);\n+ int index = 0;\n+ if(bias.isInSparseFormat())\n+ bias.sparseToDense(); // Since bias is extremely small array\n+ double [] biasArr = bias.getDenseBlock();\n+ for(int n = 0; n < N; n++) {\n+ for(int k = 0; k < K; k++) {\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ outputArray[index] += biasArr[k];\n+ }\n+ }\n+ }\n}\n//post-processing: maintain nnz\n- params.output.recomputeNonZeros();\n+ outputBlock.recomputeNonZeros();\n}\n@@ -759,7 +766,21 @@ public class LibMatrixDNN {\n}\nif(!input.isEmptyBlock() && !bias.isEmptyBlock()) {\n- runConvTask(TaskType.BiasMultiply, params);\n+ // Handles both dense and sparse inputs and copies it to dense output\n+ outputBlock.copy(input);\n+ double [] outputArray = outputBlock.getDenseBlock();\n+ int index = 0;\n+ if(bias.isInSparseFormat())\n+ bias.sparseToDense(); // Since bias is extremely small array\n+ double [] biasArr = bias.getDenseBlock();\n+ for(int n = 0; n < N; n++) {\n+ for(int k = 0; k < K; k++) {\n+ for(int pq = 0; pq < PQ; pq++, index++) {\n+ outputArray[index] *= biasArr[k];\n+ }\n+ }\n+ }\n+\n//post-processing: maintain nnz\nparams.output.recomputeNonZeros();\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] Bugfix for bias_add and bias_multiply operations |
49,736 | 30.04.2017 12:39:12 | 25,200 | b50fe1078a3ad2e4f3ae50560ba5dfd269059b5f | Added the documentation for convolution
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/dml-language-reference.md",
"new_path": "docs/dml-language-reference.md",
"diff": "@@ -1494,6 +1494,38 @@ The following code snippet shows an example scenario of transforming a training\nNote that the metadata generated during the training phase (located at `/user/ml/train_tf_metadata`) is used to apply the list of transformations (that were carried out on training data set) on the test data set. Since the second invocation of `transform()` does not really generate any new metadata data, the given metadata (`/user/ml/train_tf_metdata`) is copied to the target location (`/user/ml/test_tf_metdata`). Even though such a behavior creates redundant copies of transformation metadata, it is preferred as it allows the association of every data set with the corresponding transformation metadata.\n+### Deep Learning Built-In Functions\n+\n+SystemML represent a tensor as a matrix stored in a row-major format,\n+where first dimension of tensor and matrix are exactly the same. For example, a tensor (with all zeros)\n+of shape [3, 2, 4, 5] can be instantiated by following DML statement:\n+```sh\n+A = matrix(0, rows=3, cols=2*4*5)\n+```\n+\n+The images are assumed to be stored NCHW format, where N = batch size, C = #channels, H = height of image and W = width of image.\n+Hence, the images are internally represented as a matrix with dimension (N, C * H * W).\n+\n+\n+| Function name | Input matrices | Input Parameters | Notes |\n+|------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------|\n+| conv2d | input, filter | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Performs 2D convolution operation |\n+| conv2d_backward_filter | input, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Computes the gradients wrt filter of 2D convolution |\n+| conv2d_backward_data | filter, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], filter_shape=[numFilters, numChannels, height_filter, width_filter] | Computes the gradients wrt input of 2D convolution |\n+| max_pool | input | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Performs max pooling operation |\n+| max_pool_backward | input, dout | stride=[stride_h, stride_w], padding=[pad_h, pad_w], input_shape=[batch_size, num_channels, height_image, width_image], pool_size=[height_pool, width_pool] | Computes the gradients wrt input of 2D maxpooling |\n+\n+\n+Examples:\n+\n+| Function | Parameters | Visualization |\n+|----------------------|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|\n+| conv2d | stride=[1,1] |  |\n+| conv2d | stride=[2,2] |  |\n+| conv2d_backward_data | stride=[1,1] |  |\n+| conv2d_backward_data | stride=[2,2] |  |\n+| conv2d_backward_data | stride=[2,2] and 2x2 filter |  |\n+\n### Other Built-In Functions\n"
},
{
"change_type": "ADD",
"old_path": "docs/img/dml-language-reference/Conv2d.gif",
"new_path": "docs/img/dml-language-reference/Conv2d.gif",
"diff": "Binary files /dev/null and b/docs/img/dml-language-reference/Conv2d.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/img/dml-language-reference/Conv2d1.gif",
"new_path": "docs/img/dml-language-reference/Conv2d1.gif",
"diff": "Binary files /dev/null and b/docs/img/dml-language-reference/Conv2d1.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/img/dml-language-reference/Conv2d_backward_data.gif",
"new_path": "docs/img/dml-language-reference/Conv2d_backward_data.gif",
"diff": "Binary files /dev/null and b/docs/img/dml-language-reference/Conv2d_backward_data.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/img/dml-language-reference/Conv2d_backward_data1.gif",
"new_path": "docs/img/dml-language-reference/Conv2d_backward_data1.gif",
"diff": "Binary files /dev/null and b/docs/img/dml-language-reference/Conv2d_backward_data1.gif differ\n"
},
{
"change_type": "ADD",
"old_path": "docs/img/dml-language-reference/Conv2d_backward_data2.gif",
"new_path": "docs/img/dml-language-reference/Conv2d_backward_data2.gif",
"diff": "Binary files /dev/null and b/docs/img/dml-language-reference/Conv2d_backward_data2.gif differ\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-540] Added the documentation for convolution
Closes #464. |
49,736 | 30.04.2017 12:15:48 | 28,800 | 7a3e95716e81c1fc3415c36c357c9d24b710e739 | Initial attempt to consolidate replicated compilation chain
This only consolidates MLContext and DMLScript, but not JMLC and old
MLContext (as it is going to be removed soon).
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/DMLScript.java",
"new_path": "src/main/java/org/apache/sysml/api/DMLScript.java",
"diff": "@@ -88,11 +88,8 @@ import org.apache.sysml.runtime.controlprogram.context.SparkExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.parfor.ProgramConverter;\nimport org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer;\nimport org.apache.sysml.runtime.controlprogram.parfor.util.IDHandler;\n-import org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\n-import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.runtime.io.IOUtilFunctions;\nimport org.apache.sysml.runtime.matrix.CleanupMR;\n-import org.apache.sysml.runtime.matrix.data.LibMatrixDNN;\nimport org.apache.sysml.runtime.matrix.mapred.MRConfigurationNames;\nimport org.apache.sysml.runtime.matrix.mapred.MRJobConfiguration;\nimport org.apache.sysml.runtime.util.LocalFileUtils;\n@@ -100,7 +97,6 @@ import org.apache.sysml.runtime.util.MapReduceTool;\nimport org.apache.sysml.utils.Explain;\nimport org.apache.sysml.utils.Explain.ExplainCounts;\nimport org.apache.sysml.utils.Explain.ExplainType;\n-import org.apache.sysml.utils.GPUStatistics;\nimport org.apache.sysml.utils.Statistics;\nimport org.apache.sysml.yarn.DMLAppMasterUtils;\nimport org.apache.sysml.yarn.DMLYarnClientProxy;\n@@ -826,49 +822,16 @@ public class DMLScript\n//double costs = CostEstimationWrapper.getTimeEstimate(rtprog, ExecutionContextFactory.createContext());\n//System.out.println(\"Estimated costs: \"+costs);\n- // Whether extra statistics useful for developers and others interested in digging\n- // into performance problems are recorded and displayed\n- GPUStatistics.DISPLAY_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_GPU_STATS);\n- LibMatrixDNN.DISPLAY_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_DNN_STATS);\n-\n- // Sets the maximum number of GPUs per process, -1 for all available GPUs\n- GPUContextPool.PER_PROCESS_MAX_GPUS = dmlconf.getIntValue(DMLConfig.MAX_GPUS_PER_PROCESS);\n-\n//Step 10: execute runtime program\n- Statistics.startRunTimer();\nExecutionContext ec = null;\n- GPUContext gCtx = null;\n- try\n- {\n- //run execute (w/ exception handling to ensure proper shutdown)\n+ try {\nec = ExecutionContextFactory.createContext(rtprog);\n- if (DMLScript.USE_ACCELERATOR && ec != null){\n- gCtx = GPUContextPool.getFromPool();\n- if (gCtx == null) {\n- throw new DMLRuntimeException(\"GPU : Could not create GPUContext, either no GPU or all GPUs currently in use\");\n- }\n- gCtx.initializeThread();\n- ec.setGPUContext(gCtx);\n- }\n- rtprog.execute( ec );\n-\n- }\n- finally //ensure cleanup/shutdown\n- {\n- if(DMLScript.USE_ACCELERATOR && ec.getGPUContext() != null) {\n- GPUContextPool.returnToPool(ec.getGPUContext());\n+ ScriptExecutorUtils.executeRuntimeProgram(rtprog, ec, dmlconf);\n}\n-\n- if( dmlconf.getBooleanValue(DMLConfig.CODEGEN) )\n- SpoofCompiler.cleanupCodeGenerator();\n+ finally {\nif(ec != null && ec instanceof SparkExecutionContext)\n((SparkExecutionContext) ec).close();\n-\n- //display statistics (incl caching stats if enabled)\n- Statistics.stopRunTimer();\n- LOG.info(Statistics.display());\nLOG.info(\"END DML run \" + getDateTime() );\n-\n//cleanup scratch_space and all working dirs\ncleanupHadoopExecution( dmlconf );\n}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+package org.apache.sysml.api;\n+\n+import org.apache.commons.logging.Log;\n+import org.apache.commons.logging.LogFactory;\n+import org.apache.sysml.conf.DMLConfig;\n+import org.apache.sysml.hops.codegen.SpoofCompiler;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.Program;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\n+import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixDNN;\n+import org.apache.sysml.utils.GPUStatistics;\n+import org.apache.sysml.utils.Statistics;\n+\n+public class ScriptExecutorUtils {\n+ private static final Log LOG = LogFactory.getLog(ScriptExecutorUtils.class.getName());\n+\n+ /**\n+ * Execute the runtime program. This involves execution of the program\n+ * blocks that make up the runtime program and may involve dynamic\n+ * recompilation.\n+ *\n+ * @param rtprog runtime program\n+ * @param ec execution context\n+ * @param dmlconf dml configuration\n+ * @throws DMLRuntimeException if error occurs\n+ */\n+ public static void executeRuntimeProgram(Program rtprog, ExecutionContext ec, DMLConfig dmlconf) throws DMLRuntimeException {\n+ // Whether extra statistics useful for developers and others interested in digging\n+ // into performance problems are recorded and displayed\n+ GPUStatistics.DISPLAY_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_GPU_STATS);\n+ LibMatrixDNN.DISPLAY_STATISTICS = dmlconf.getBooleanValue(DMLConfig.EXTRA_DNN_STATS);\n+\n+ // Sets the maximum number of GPUs per process, -1 for all available GPUs\n+ GPUContextPool.PER_PROCESS_MAX_GPUS = dmlconf.getIntValue(DMLConfig.MAX_GPUS_PER_PROCESS);\n+ Statistics.startRunTimer();\n+ GPUContext gCtx = null;\n+ try {\n+ //run execute (w/ exception handling to ensure proper shutdown)\n+ if (DMLScript.USE_ACCELERATOR && ec != null){\n+ gCtx = GPUContextPool.getFromPool();\n+ if (gCtx == null) {\n+ throw new DMLRuntimeException(\"GPU : Could not create GPUContext, either no GPU or all GPUs currently in use\");\n+ }\n+ gCtx.initializeThread();\n+ ec.setGPUContext(gCtx);\n+ }\n+ rtprog.execute( ec );\n+ }\n+ finally //ensure cleanup/shutdown\n+ {\n+ if(DMLScript.USE_ACCELERATOR && ec.getGPUContext() != null) {\n+ GPUContextPool.returnToPool(ec.getGPUContext());\n+ }\n+ if( dmlconf.getBooleanValue(DMLConfig.CODEGEN) )\n+ SpoofCompiler.cleanupCodeGenerator();\n+\n+ //display statistics (incl caching stats if enabled)\n+ Statistics.stopRunTimer();\n+ LOG.info(Statistics.display());\n+ }\n+ }\n+\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -25,6 +25,7 @@ import java.util.Set;\nimport org.apache.commons.lang3.StringUtils;\nimport org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.ScriptExecutorUtils;\nimport org.apache.sysml.api.jmlc.JMLCUtils;\nimport org.apache.sysml.api.mlcontext.MLContext.ExplainLevel;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -47,8 +48,6 @@ import org.apache.sysml.runtime.controlprogram.LocalVariableMap;\nimport org.apache.sysml.runtime.controlprogram.Program;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContextFactory;\n-import org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\n-import org.apache.sysml.runtime.instructions.gpu.context.GPUContextPool;\nimport org.apache.sysml.utils.Explain;\nimport org.apache.sysml.utils.Explain.ExplainCounts;\nimport org.apache.sysml.utils.Explain.ExplainType;\n@@ -118,7 +117,9 @@ public class ScriptExecutor {\nprotected boolean init = false;\nprotected boolean explain = false;\nprotected boolean gpu = false;\n+ protected boolean oldGPU = false;\nprotected boolean statistics = false;\n+ protected boolean oldStatistics = false;\nprotected ExplainLevel explainLevel;\nprotected int statisticsMaxHeavyHitters = 10;\nprotected boolean maintainSymbolTable = false;\n@@ -245,18 +246,10 @@ public class ScriptExecutor {\nif (symbolTable != null) {\nexecutionContext.setVariables(symbolTable);\n}\n- try {\n- if (gpu) {\n- GPUContext gCtx = GPUContextPool.getFromPool();\n- if (gCtx == null)\n- throw new MLContextException(\"GPU : no GPUs or no more free GPUs available\");\n- executionContext.setGPUContext(gCtx);\n- gCtx.initializeThread();\n- }\n- } catch (DMLRuntimeException e) {\n- throw new MLContextException(\"GPU : Exception occurred during initialization\");\n- }\n-\n+ oldGPU = DMLScript.USE_ACCELERATOR;\n+ oldStatistics = DMLScript.STATISTICS;\n+ DMLScript.USE_ACCELERATOR = gpu;\n+ DMLScript.STATISTICS = statistics;\n}\n/**\n@@ -290,9 +283,6 @@ public class ScriptExecutor {\npublic MLResults execute(Script script) {\n// main steps in script execution\n- if(statistics) {\n- Statistics.startRunTimer();\n- }\nsetup(script);\nparseScript();\nliveVariableAnalysis();\n@@ -307,19 +297,19 @@ public class ScriptExecutor {\ncountCompiledMRJobsAndSparkInstructions();\ninitializeCachingAndScratchSpace();\ncleanupRuntimeProgram();\n+\n+ try {\ncreateAndInitializeExecutionContext();\nexecuteRuntimeProgram();\n+ }\n+ finally {\ncleanupAfterExecution();\n+ }\n// add symbol table to MLResults\nMLResults mlResults = new MLResults(script);\nscript.setResults(mlResults);\n- if (statistics) {\n- Statistics.stopRunTimer();\n- System.out.println(Statistics.display(statisticsMaxHeavyHitters));\n- }\n-\nreturn mlResults;\n}\n@@ -344,14 +334,8 @@ public class ScriptExecutor {\n*/\nprotected void cleanupAfterExecution() {\nrestoreInputsInSymbolTable();\n- try {\n- if (gpu) {\n- GPUContext gCtx = executionContext.getGPUContext();\n- GPUContextPool.returnToPool(gCtx);\n- }\n- } catch (DMLRuntimeException e) {\n- throw new MLContextException(\"Exception occurred during cleanup of GPU related resources\", e);\n- }\n+ DMLScript.USE_ACCELERATOR = oldGPU;\n+ DMLScript.STATISTICS = oldStatistics;\n}\n/**\n@@ -394,7 +378,7 @@ public class ScriptExecutor {\n*/\nprotected void executeRuntimeProgram() {\ntry {\n- runtimeProgram.execute(executionContext);\n+ ScriptExecutorUtils.executeRuntimeProgram(runtimeProgram, executionContext, config);\n} catch (DMLRuntimeException e) {\nthrow new MLContextException(\"Exception occurred while executing runtime program\", e);\n}\n@@ -667,7 +651,6 @@ public class ScriptExecutor {\n*/\npublic void setGPU(boolean enabled) {\nthis.gpu = enabled;\n- DMLScript.USE_ACCELERATOR = enabled;\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-591] Initial attempt to consolidate replicated compilation chain
This only consolidates MLContext and DMLScript, but not JMLC and old
MLContext (as it is going to be removed soon).
Closes #471. |
49,738 | 30.04.2017 16:47:15 | 25,200 | f2a927f87d8d9f82bcd64e42d2eb5abb838d6085 | [MINOR] Fix robustness codegen cost-based plan selection (cost model) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/PlanSelectionFuseCostBased.java",
"diff": "@@ -725,7 +725,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nrGetComputeCosts(c, partition, computeCosts);\n//get costs for given hop\n- double costs = 0;\n+ double costs = 1;\nif( current instanceof UnaryOp ) {\nswitch( ((UnaryOp)current).getOp() ) {\ncase ABS:\n@@ -759,7 +759,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncase CUMMAX:\ncase CUMPROD: costs = 1; break;\ndefault:\n- throw new RuntimeException(\"Cost model not \"\n+ LOG.warn(\"Cost model not \"\n+ \"implemented yet for: \"+((UnaryOp)current).getOp());\n}\n}\n@@ -803,7 +803,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nbreak;\ncase COVARIANCE: costs = 23; break;\ndefault:\n- throw new RuntimeException(\"Cost model not \"\n+ LOG.warn(\"Cost model not \"\n+ \"implemented yet for: \"+((BinaryOp)current).getOp());\n}\n}\n@@ -826,7 +826,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\nbreak;\ncase COVARIANCE: costs = 23; break;\ndefault:\n- throw new RuntimeException(\"Cost model not \"\n+ LOG.warn(\"Cost model not \"\n+ \"implemented yet for: \"+((TernaryOp)current).getOp());\n}\n}\n@@ -849,7 +849,7 @@ public class PlanSelectionFuseCostBased extends PlanSelection\ncase MIN:\ncase MAX: costs = 1; break;\ndefault:\n- throw new RuntimeException(\"Cost model not \"\n+ LOG.warn(\"Cost model not \"\n+ \"implemented yet for: \"+((AggUnaryOp)current).getOp());\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix robustness codegen cost-based plan selection (cost model) |
49,717 | 30.04.2017 21:45:21 | 25,200 | e8fbc753988dc94e97a8e8b723e22e89483a1fc6 | Initial implementation of "solve" for GPU
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/BinaryOp.java",
"diff": "@@ -592,7 +592,7 @@ public class BinaryOp extends Hop\nif ( et == ExecType.CP )\n{\nif(DMLScript.USE_ACCELERATOR && (DMLScript.FORCE_ACCELERATOR || getMemEstimate() < OptimizerUtils.GPU_MEMORY_BUDGET)\n- && (op == OpOp2.MULT || op == OpOp2.PLUS || op == OpOp2.MINUS || op == OpOp2.DIV || op == OpOp2.POW)) {\n+ && (op == OpOp2.MULT || op == OpOp2.PLUS || op == OpOp2.MINUS || op == OpOp2.DIV || op == OpOp2.POW || op == OpOp2.SOLVE)) {\net = ExecType.GPU;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java",
"diff": "@@ -23,6 +23,7 @@ import java.util.HashMap;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.instructions.gpu.AggregateBinaryGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.ArithmeticBinaryGPUInstruction;\n+import org.apache.sysml.runtime.instructions.gpu.BuiltinBinaryGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.BuiltinUnaryGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.ConvolutionGPUInstruction;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\n@@ -75,6 +76,9 @@ public class GPUInstructionParser extends InstructionParser\nString2GPUInstructionType.put( \"sel+\" , GPUINSTRUCTION_TYPE.BuiltinUnary);\nString2GPUInstructionType.put( \"exp\" , GPUINSTRUCTION_TYPE.BuiltinUnary);\n+ String2GPUInstructionType.put( \"solve\" , GPUINSTRUCTION_TYPE.BuiltinBinary);\n+\n+\n// Aggregate Unary\nString2GPUInstructionType.put( \"ua+\" , GPUINSTRUCTION_TYPE.AggregateUnary); // Sum\nString2GPUInstructionType.put( \"uak+\" , GPUINSTRUCTION_TYPE.AggregateUnary); // Sum\n@@ -133,6 +137,9 @@ public class GPUInstructionParser extends InstructionParser\ncase BuiltinUnary:\nreturn BuiltinUnaryGPUInstruction.parseInstruction(str);\n+ case BuiltinBinary:\n+ return BuiltinBinaryGPUInstruction.parseInstruction(str);\n+\ncase Convolution:\nreturn ConvolutionGPUInstruction.parseInstruction(str);\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinBinaryGPUInstruction.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.gpu;\n+\n+import org.apache.sysml.parser.Expression;\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.functionobjects.Builtin;\n+import org.apache.sysml.runtime.functionobjects.ValueFunction;\n+import org.apache.sysml.runtime.instructions.InstructionUtils;\n+import org.apache.sysml.runtime.instructions.cp.CPOperand;\n+import org.apache.sysml.runtime.matrix.operators.BinaryOperator;\n+import org.apache.sysml.runtime.matrix.operators.Operator;\n+\n+public abstract class BuiltinBinaryGPUInstruction extends GPUInstruction {\n+\n+ private int _arity;\n+ CPOperand output;\n+ CPOperand input1, input2;\n+\n+\n+ public BuiltinBinaryGPUInstruction(Operator op, CPOperand input1, CPOperand input2, CPOperand output, String opcode, String istr, int _arity) {\n+ super(op, opcode, istr);\n+ this._arity = _arity;\n+ this.output = output;\n+ this.input1 = input1;\n+ this.input2 = input2;\n+ }\n+\n+ public static BuiltinBinaryGPUInstruction parseInstruction(String str) throws DMLRuntimeException {\n+ CPOperand in1 = new CPOperand(\"\", Expression.ValueType.UNKNOWN, Expression.DataType.UNKNOWN);\n+ CPOperand in2 = new CPOperand(\"\", Expression.ValueType.UNKNOWN, Expression.DataType.UNKNOWN);\n+ CPOperand out = new CPOperand(\"\", Expression.ValueType.UNKNOWN, Expression.DataType.UNKNOWN);\n+\n+ String[] parts = InstructionUtils.getInstructionPartsWithValueType(str);\n+ InstructionUtils.checkNumFields ( parts, 3 );\n+\n+ String opcode = parts[0];\n+ in1.split(parts[1]);\n+ in2.split(parts[2]);\n+ out.split(parts[3]);\n+\n+ // check for valid data type of output\n+ if((in1.getDataType() == Expression.DataType.MATRIX || in2.getDataType() == Expression.DataType.MATRIX) && out.getDataType() != Expression.DataType.MATRIX)\n+ throw new DMLRuntimeException(\"Element-wise matrix operations between variables \" + in1.getName() +\n+ \" and \" + in2.getName() + \" must produce a matrix, which \" + out.getName() + \" is not\");\n+\n+ // Determine appropriate Function Object based on opcode\n+ ValueFunction func = Builtin.getBuiltinFnObject(opcode);\n+\n+ // Only for \"solve\"\n+ if ( in1.getDataType() == Expression.DataType.SCALAR && in2.getDataType() == Expression.DataType.SCALAR )\n+ throw new DMLRuntimeException(\"GPU : Unsupported GPU builtin operations on 2 scalars\");\n+ else if ( in1.getDataType() == Expression.DataType.MATRIX && in2.getDataType() == Expression.DataType.MATRIX )\n+ return new MatrixMatrixBuiltinGPUInstruction(new BinaryOperator(func), in1, in2, out, opcode, str, 2);\n+ else\n+ throw new DMLRuntimeException(\"GPU : Unsupported GPU builtin operations on a matrix and a scalar\");\n+\n+\n+ }\n+\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java",
"diff": "@@ -32,7 +32,7 @@ import org.apache.sysml.utils.Statistics;\npublic abstract class GPUInstruction extends Instruction\n{\n- public enum GPUINSTRUCTION_TYPE { AggregateUnary, AggregateBinary, Convolution, MMTSJ, Reorg, ArithmeticBinary, BuiltinUnary, Builtin };\n+ public enum GPUINSTRUCTION_TYPE { AggregateUnary, AggregateBinary, Convolution, MMTSJ, Reorg, ArithmeticBinary, BuiltinUnary, BuiltinBinary, Builtin };\n// Memory/conversions\npublic final static String MISC_TIMER_HOST_TO_DEVICE = \"H2D\"; // time spent in bringing data to gpu (from host)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixMatrixBuiltinGPUInstruction.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.runtime.instructions.gpu;\n+\n+import org.apache.sysml.runtime.DMLRuntimeException;\n+import org.apache.sysml.runtime.controlprogram.caching.MatrixObject;\n+import org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\n+import org.apache.sysml.runtime.instructions.cp.CPOperand;\n+import org.apache.sysml.runtime.matrix.data.LibMatrixCUDA;\n+import org.apache.sysml.runtime.matrix.operators.Operator;\n+import org.apache.sysml.utils.GPUStatistics;\n+\n+\n+public class MatrixMatrixBuiltinGPUInstruction extends BuiltinBinaryGPUInstruction {\n+\n+ public MatrixMatrixBuiltinGPUInstruction(Operator op, CPOperand input1, CPOperand input2, CPOperand output, String opcode, String istr, int _arity) {\n+ super(op, input1, input2, output, opcode, istr, _arity);\n+ _gputype = GPUINSTRUCTION_TYPE.BuiltinUnary;\n+\n+ }\n+\n+ @Override\n+ public void processInstruction(ExecutionContext ec) throws DMLRuntimeException {\n+ GPUStatistics.incrementNoOfExecutedGPUInst();\n+\n+ String opcode = getOpcode();\n+ MatrixObject mat1 = getMatrixInputForGPUInstruction(ec, input1.getName());\n+ MatrixObject mat2 = getMatrixInputForGPUInstruction(ec, input2.getName());\n+\n+ if(opcode.equals(\"solve\")) {\n+ LibMatrixCUDA.solve(ec, ec.getGPUContext(), getExtendedOpcode(), mat1, mat2, output.getName());\n+\n+ } else {\n+ throw new DMLRuntimeException(\"Unsupported GPU operator:\" + opcode);\n+ }\n+ ec.releaseMatrixInputForGPUInstruction(input1.getName());\n+ ec.releaseMatrixInputForGPUInstruction(input2.getName());\n+ ec.releaseMatrixOutputForGPUInstruction(output.getName());\n+ }\n+\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java",
"diff": "@@ -29,6 +29,7 @@ import static jcuda.jcusparse.JCusparse.cusparseXcsrgemmNnz;\nimport static jcuda.jcusparse.cusparseIndexBase.CUSPARSE_INDEX_BASE_ZERO;\nimport static jcuda.jcusparse.cusparseMatrixType.CUSPARSE_MATRIX_TYPE_GENERAL;\nimport static jcuda.runtime.JCuda.cudaMemcpy;\n+import static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice;\n@@ -39,6 +40,7 @@ import org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.utils.GPUStatistics;\nimport jcuda.Pointer;\n+import jcuda.Sizeof;\nimport jcuda.jcublas.cublasHandle;\nimport jcuda.jcusparse.cusparseHandle;\nimport jcuda.jcusparse.cusparseMatDescr;\n@@ -52,11 +54,11 @@ public class CSRPointer {\nprivate static final Log LOG = LogFactory.getLog(CSRPointer.class.getName());\n+ private static final double ULTRA_SPARSITY_TURN_POINT = 0.0004;\n+\n/** {@link GPUContext} instance to track the GPU to do work on */\nprivate final GPUContext gpuContext;\n- private static final double ULTRA_SPARSITY_TURN_POINT = 0.0004;\n-\npublic static cusparseMatDescr matrixDescriptor;\n/** Number of non zeroes */\n@@ -74,6 +76,27 @@ public class CSRPointer {\n/** descriptor of matrix, only CUSPARSE_MATRIX_TYPE_GENERAL supported */\npublic cusparseMatDescr descr;\n+\n+ public CSRPointer clone(int rows) throws DMLRuntimeException {\n+ CSRPointer me = this;\n+ CSRPointer that = new CSRPointer(me.getGPUContext());\n+\n+ that.allocateMatDescrPointer();\n+ long totalSize = estimateSize(me.nnz, rows);\n+ that.gpuContext.ensureFreeSpace(totalSize);\n+\n+ that.nnz = me.nnz;\n+ that.val = allocate(that.nnz * Sizeof.DOUBLE);\n+ that.rowPtr = allocate(rows * Sizeof.DOUBLE);\n+ that.colInd = allocate(that.nnz * Sizeof.DOUBLE);\n+\n+ cudaMemcpy(that.val, me.val, that.nnz * Sizeof.DOUBLE, cudaMemcpyDeviceToDevice);\n+ cudaMemcpy(that.rowPtr, me.rowPtr, rows * Sizeof.DOUBLE, cudaMemcpyDeviceToDevice);\n+ cudaMemcpy(that.colInd, me.colInd, that.nnz * Sizeof.DOUBLE, cudaMemcpyDeviceToDevice);\n+\n+ return that;\n+ }\n+\n/**\n* Default constructor to help with Factory method {@link #allocateEmpty(GPUContext, long, long)}\n* @param gCtx a valid {@link GPUContext}\n@@ -114,7 +137,7 @@ public class CSRPointer {\nreturn numElems * ((long)jcuda.Sizeof.INT);\n}\n- private GPUContext getGPUContext() throws DMLRuntimeException {\n+ private GPUContext getGPUContext() {\nreturn gpuContext;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java",
"diff": "@@ -22,8 +22,13 @@ import static jcuda.jcublas.JCublas2.cublasCreate;\nimport static jcuda.jcublas.JCublas2.cublasDestroy;\nimport static jcuda.jcudnn.JCudnn.cudnnCreate;\nimport static jcuda.jcudnn.JCudnn.cudnnDestroy;\n+import static jcuda.jcusolver.JCusolverDn.cusolverDnDestroy;\n+import static jcuda.jcusolver.JCusolverSp.cusolverSpDestroy;\nimport static jcuda.jcusparse.JCusparse.cusparseCreate;\nimport static jcuda.jcusparse.JCusparse.cusparseDestroy;\n+import static jcuda.jcusolver.JCusolverDn.cusolverDnCreate;\n+import static jcuda.jcusolver.JCusolverSp.cusolverSpCreate;\n+\nimport static jcuda.runtime.JCuda.cudaDeviceScheduleBlockingSync;\nimport static jcuda.runtime.JCuda.cudaFree;\nimport static jcuda.runtime.JCuda.cudaGetDeviceCount;\n@@ -54,6 +59,8 @@ import org.apache.sysml.utils.LRUCacheMap;\nimport jcuda.Pointer;\nimport jcuda.jcublas.cublasHandle;\nimport jcuda.jcudnn.cudnnHandle;\n+import jcuda.jcusolver.cusolverDnHandle;\n+import jcuda.jcusolver.cusolverSpHandle;\nimport jcuda.jcusparse.cusparseHandle;\nimport jcuda.runtime.JCuda;\nimport jcuda.runtime.cudaDeviceProp;\n@@ -90,15 +97,21 @@ public class GPUContext {\n* so that an extraneous host to dev transfer can be avoided */\nprivate ArrayList<GPUObject> allocatedGPUObjects = new ArrayList<>();\n- /** cudnnHandle specific to the active GPU for this GPUContext */\n+ /** cudnnHandle for Deep Neural Network operations on the GPU */\nprivate cudnnHandle cudnnHandle;\n- /** cublasHandle specific to the active GPU for this GPUContext */\n+ /** cublasHandle for BLAS operations on the GPU */\nprivate cublasHandle cublasHandle;\n- /** cusparseHandle specific to the active GPU for this GPUContext */\n+ /** cusparseHandle for certain sparse BLAS operations on the GPU */\nprivate cusparseHandle cusparseHandle;\n+ /** cusolverDnHandle for invoking solve() function on dense matrices on the GPU */\n+ private cusolverDnHandle cusolverDnHandle;\n+\n+ /** cusolverSpHandle for invoking solve() function on sparse matrices on the GPU */\n+ private cusolverSpHandle cusolverSpHandle;\n+\n/** to launch custom CUDA kernel, specific to the active GPU for this GPUContext */\nprivate JCudaKernels kernels;\n@@ -133,6 +146,12 @@ public class GPUContext {\n// cublasSetPointerMode(LibMatrixCUDA.cublasHandle, cublasPointerMode.CUBLAS_POINTER_MODE_DEVICE);\ncusparseHandle = new cusparseHandle();\ncusparseCreate(cusparseHandle);\n+\n+ cusolverDnHandle = new cusolverDnHandle();\n+ cusolverDnCreate(cusolverDnHandle);\n+ cusolverSpHandle = new cusolverSpHandle();\n+ cusolverSpCreate(cusolverSpHandle);\n+\nkernels = new JCudaKernels(deviceNum);\nGPUStatistics.cudaLibrariesInitTime = System.nanoTime() - start;\n@@ -553,6 +572,14 @@ public class GPUContext {\nreturn cusparseHandle;\n}\n+ public cusolverDnHandle getCusolverDnHandle() {\n+ return cusolverDnHandle;\n+ }\n+\n+ public cusolverSpHandle getCusolverSpHandle() {\n+ return cusolverSpHandle;\n+ }\n+\npublic JCudaKernels getKernels() {\nreturn kernels;\n}\n@@ -569,6 +596,8 @@ public class GPUContext {\ncudnnDestroy(cudnnHandle);\ncublasDestroy(cublasHandle);\ncusparseDestroy(cusparseHandle);\n+ cusolverDnDestroy(cusolverDnHandle);\n+ cusolverSpDestroy(cusolverSpHandle);\ncudnnHandle = null;\ncublasHandle = null;\ncusparseHandle = null;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java",
"diff": "@@ -26,7 +26,9 @@ import static jcuda.jcudnn.cudnnDataType.CUDNN_DATA_DOUBLE;\nimport static jcuda.jcudnn.cudnnTensorFormat.CUDNN_TENSOR_NCHW;\nimport static jcuda.jcusparse.JCusparse.cusparseDdense2csr;\nimport static jcuda.jcusparse.JCusparse.cusparseDnnz;\n+import static jcuda.runtime.JCuda.cudaMalloc;\nimport static jcuda.runtime.JCuda.cudaMemcpy;\n+import static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice;\n@@ -50,6 +52,7 @@ import org.apache.sysml.runtime.matrix.data.SparseBlockMCSR;\nimport org.apache.sysml.utils.GPUStatistics;\nimport jcuda.Pointer;\n+import jcuda.Sizeof;\nimport jcuda.jcublas.JCublas2;\nimport jcuda.jcudnn.cudnnTensorDescriptor;\nimport jcuda.jcusparse.JCusparse;\n@@ -100,6 +103,43 @@ public class GPUObject {\n// return getGPUContext().allocate(instName, size);\n// }\n+ @Override\n+ public Object clone() {\n+ GPUObject me = this;\n+ GPUObject that = new GPUObject(me.gpuContext, me.mat);\n+ if (me.tensorShape != null) {\n+ that.tensorShape = new int[me.tensorShape.length];\n+ System.arraycopy(me.tensorShape, 0, that.tensorShape, 0, me.tensorShape.length);\n+ that.allocateTensorDescriptor(me.tensorShape[0], me.tensorShape[1], me.tensorShape[2], me.tensorShape[3]);\n+ }\n+ that.dirty = me.dirty;\n+ that.readLocks = new AtomicInteger(me.readLocks.get());\n+ that.timestamp = new AtomicLong(me.timestamp.get());\n+ that.isSparse = me.isSparse;\n+\n+ try {\n+ if (me.jcudaDenseMatrixPtr != null) {\n+ long rows = me.mat.getNumRows();\n+ long cols = me.mat.getNumColumns();\n+ long size = rows * cols * Sizeof.DOUBLE;\n+ me.gpuContext.ensureFreeSpace((int)size);\n+ that.jcudaDenseMatrixPtr = allocate(size);\n+ cudaMemcpy(that.jcudaDenseMatrixPtr, me.jcudaDenseMatrixPtr, size, cudaMemcpyDeviceToDevice);\n+ }\n+\n+ if (me.jcudaSparseMatrixPtr != null){\n+ long rows = mat.getNumRows();\n+ that.jcudaSparseMatrixPtr = me.jcudaSparseMatrixPtr.clone((int)rows);\n+ }\n+\n+\n+ } catch (DMLRuntimeException e){\n+ throw new RuntimeException(e);\n+ }\n+\n+ return that;\n+ }\n+\nprivate Pointer allocate(long size) throws DMLRuntimeException {\nreturn getGPUContext().allocate(size);\n}\n@@ -116,7 +156,7 @@ public class GPUObject {\ngetGPUContext().cudaFreeHelper(instName, toFree, eager);\n}\n- private GPUContext getGPUContext() throws DMLRuntimeException {\n+ private GPUContext getGPUContext() {\nreturn gpuContext;\n}\n@@ -275,7 +315,7 @@ public class GPUObject {\nif(getJcudaDenseMatrixPtr() == null || !isAllocated())\nthrow new DMLRuntimeException(\"Expected allocated dense matrix before denseToSparse() call\");\n- convertDensePtrFromRowMajorToColumnMajor();\n+ denseRowMajorToColumnMajor();\nsetSparseMatrixCudaPointer(columnMajorDenseToRowMajorSparse(getGPUContext(), cusparseHandle, getJcudaDenseMatrixPtr(), rows, cols));\n// TODO: What if mat.getNnz() is -1 ?\nif (DMLScript.STATISTICS) GPUStatistics.cudaDenseToSparseTime.addAndGet(System.nanoTime() - t0);\n@@ -283,10 +323,10 @@ public class GPUObject {\n}\n/**\n- * Convenience method. Converts Row Major Dense Matrix --> Column Major Dense Matrix\n+ * Convenience method. Converts Row Major Dense Matrix to Column Major Dense Matrix\n* @throws DMLRuntimeException if DMLRuntimeException occurs\n*/\n- private void convertDensePtrFromRowMajorToColumnMajor() throws DMLRuntimeException {\n+ public void denseRowMajorToColumnMajor() throws DMLRuntimeException {\nLOG.trace(\"GPU : dense Ptr row-major -> col-major on \" + this + \", GPUContext=\" + getGPUContext());\nint m = toIntExact(mat.getNumRows());\nint n = toIntExact(mat.getNumColumns());\n@@ -301,7 +341,11 @@ public class GPUObject {\nsetDenseMatrixCudaPointer(tmp);\n}\n- private void convertDensePtrFromColMajorToRowMajor() throws DMLRuntimeException {\n+ /**\n+ * Convenience method. Converts Column Major Dense Matrix to Row Major Dense Matrix\n+ * @throws DMLRuntimeException\n+ */\n+ public void denseColumnMajorToRowMajor() throws DMLRuntimeException {\nLOG.trace(\"GPU : dense Ptr row-major -> col-major on \" + this + \", GPUContext=\" + getGPUContext());\nint n = toIntExact(mat.getNumRows());\n@@ -340,7 +384,7 @@ public class GPUObject {\nthrow new DMLRuntimeException(\"Expected allocated sparse matrix before sparseToDense() call\");\nsparseToColumnMajorDense();\n- convertDensePtrFromColMajorToRowMajor();\n+ denseColumnMajorToRowMajor();\nif (DMLScript.STATISTICS) end = System.nanoTime();\nif (instructionName != null && GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instructionName, GPUInstruction.MISC_TIMER_SPARSE_TO_DENSE, end - start);\nif (DMLScript.STATISTICS) GPUStatistics.cudaSparseToDenseTime.addAndGet(end - start);\n@@ -431,17 +475,12 @@ public class GPUObject {\n}\npublic boolean isInputAllocated() {\n- try {\nboolean eitherAllocated = (getJcudaDenseMatrixPtr() != null || getJcudaSparseMatrixPtr() != null);\nboolean isAllocatedOnThisGPUContext = getGPUContext().isBlockRecorded(this);\nif (eitherAllocated && !isAllocatedOnThisGPUContext) {\nLOG.warn(\"GPU : A block was allocated but was not on this GPUContext, GPUContext=\" + getGPUContext());\n}\nreturn eitherAllocated && isAllocatedOnThisGPUContext;\n- } catch (DMLRuntimeException e){\n- LOG.info(\"GPU : System is in an inconsistent state\");\n- throw new RuntimeException(e);\n- }\n}\n/**\n@@ -863,5 +902,4 @@ public class GPUObject {\nreturn sb.toString();\n}\n-\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -95,10 +95,10 @@ import org.apache.sysml.runtime.functionobjects.ReduceRow;\nimport org.apache.sysml.runtime.functionobjects.ValueFunction;\nimport org.apache.sysml.runtime.instructions.cp.DoubleObject;\nimport org.apache.sysml.runtime.instructions.gpu.GPUInstruction;\n+import org.apache.sysml.runtime.instructions.gpu.context.CSRPointer;\nimport org.apache.sysml.runtime.instructions.gpu.context.ExecutionConfig;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUContext;\nimport org.apache.sysml.runtime.instructions.gpu.context.GPUObject;\n-import org.apache.sysml.runtime.instructions.gpu.context.CSRPointer;\nimport org.apache.sysml.runtime.instructions.gpu.context.JCudaKernels;\nimport org.apache.sysml.runtime.matrix.operators.AggregateOperator;\nimport org.apache.sysml.runtime.matrix.operators.AggregateUnaryOperator;\n@@ -114,9 +114,11 @@ import jcuda.CudaException;\nimport jcuda.Pointer;\nimport jcuda.Sizeof;\nimport jcuda.jcublas.JCublas2;\n+import jcuda.jcublas.cublasDiagType;\nimport jcuda.jcublas.cublasFillMode;\nimport jcuda.jcublas.cublasHandle;\nimport jcuda.jcublas.cublasOperation;\n+import jcuda.jcublas.cublasSideMode;\nimport jcuda.jcudnn.cudnnActivationDescriptor;\nimport jcuda.jcudnn.cudnnBatchNormMode;\nimport jcuda.jcudnn.cudnnConvolutionDescriptor;\n@@ -126,6 +128,7 @@ import jcuda.jcudnn.cudnnHandle;\nimport jcuda.jcudnn.cudnnPoolingDescriptor;\nimport jcuda.jcudnn.cudnnStatus;\nimport jcuda.jcudnn.cudnnTensorDescriptor;\n+import jcuda.jcusolver.JCusolverDn;\nimport jcuda.jcusparse.JCusparse;\nimport jcuda.jcusparse.cusparseHandle;\n@@ -306,15 +309,31 @@ public class LibMatrixCUDA {\n/**\n* Convenience method to get jcudaDenseMatrixPtr. This method explicitly converts sparse to dense format, so use it judiciously.\n* @param gCtx a valid {@link GPUContext}\n- * @param image input matrix object\n+ * @param input input matrix object\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n* @return jcuda pointer\n* @throws DMLRuntimeException if error occurs while sparse to dense conversion\n*/\n- private static Pointer getDensePointer(GPUContext gCtx, MatrixObject image, String instName) throws DMLRuntimeException {\n- if(isInSparseFormat(gCtx, image)) {\n- image.getGPUObject(gCtx).sparseToDense(instName);\n+ private static Pointer getDensePointer(GPUContext gCtx, MatrixObject input, String instName) throws DMLRuntimeException {\n+ if(isInSparseFormat(gCtx, input)) {\n+ input.getGPUObject(gCtx).sparseToDense(instName);\n+ }\n+ return input.getGPUObject(gCtx).getJcudaDenseMatrixPtr();\n+ }\n+\n+ /**\n+ * Convenience method to get the sparse matrix pointer from a {@link MatrixObject}. Converts dense to sparse if necessary.\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param input input matrix\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @return a sparse matrix pointer\n+ * @throws DMLRuntimeException if error occurs\n+ */\n+ private static CSRPointer getSparsePointer(GPUContext gCtx, MatrixObject input, String instName) throws DMLRuntimeException {\n+ if(!isInSparseFormat(gCtx, input)) {\n+ input.getGPUObject(gCtx).denseToSparse();\n}\n- return image.getGPUObject(gCtx).getJcudaDenseMatrixPtr();\n+ return input.getGPUObject(gCtx).getJcudaSparseMatrixPtr();\n}\n/**\n@@ -2927,6 +2946,108 @@ public class LibMatrixCUDA {\n}\n}\n+\n+ /**\n+ * Implements the \"solve\" function for systemml Ax = B (A is of size m*n, B is of size m*1, x is of size n*1)\n+ *\n+ * @param ec a valid {@link ExecutionContext}\n+ * @param gCtx a valid {@link GPUContext}\n+ * @param instName the invoking instruction's name for record {@link Statistics}.\n+ * @param in1 input matrix A\n+ * @param in2 input matrix B\n+ * @param outputName name of the output matrix\n+ * @throws DMLRuntimeException if an error occurs\n+ */\n+ public static void solve(ExecutionContext ec, GPUContext gCtx, String instName, MatrixObject in1, MatrixObject in2, String outputName) throws DMLRuntimeException {\n+ if (ec.getGPUContext() != gCtx)\n+ throw new DMLRuntimeException(\"GPU : Invalid internal state, the GPUContext set with the ExecutionContext is not the same used to run this LibMatrixCUDA function\");\n+\n+ // x = solve(A, b)\n+\n+ // Both Sparse\n+ if (!isInSparseFormat(gCtx, in1) && !isInSparseFormat(gCtx, in2)) { // Both dense\n+ GPUObject Aobj = in1.getGPUObject(gCtx);\n+ GPUObject bobj = in2.getGPUObject(gCtx);\n+ int m = (int) in1.getNumRows();\n+ int n = (int) in1.getNumColumns();\n+ if ((int) in2.getNumRows() != m)\n+ throw new DMLRuntimeException(\"GPU : Incorrect input for solve(), rows in A should be the same as rows in B\");\n+ if ((int) in2.getNumColumns() != 1)\n+ throw new DMLRuntimeException(\"GPU : Incorrect input for solve(), columns in B should be 1\");\n+\n+\n+ // Copy over matrices and\n+ // convert dense matrices to row major\n+ // Operation in cuSolver and cuBlas are for column major dense matrices\n+ // and are destructive to the original input\n+ GPUObject ATobj = (GPUObject) Aobj.clone();\n+ ATobj.denseRowMajorToColumnMajor();\n+ Pointer A = ATobj.getJcudaDenseMatrixPtr();\n+\n+ GPUObject bTobj = (GPUObject) bobj.clone();\n+ bTobj.denseRowMajorToColumnMajor();\n+ Pointer b = bTobj.getJcudaDenseMatrixPtr();\n+\n+ // The following set of operations is done following the example in the cusolver documentation\n+ // http://docs.nvidia.com/cuda/cusolver/#ormqr-example1\n+\n+ // step 3: query working space of geqrf and ormqr\n+ int[] lwork = {0};\n+ JCusolverDn.cusolverDnDgeqrf_bufferSize(gCtx.getCusolverDnHandle(), m, n, A, m, lwork);\n+\n+ // step 4: compute QR factorization\n+ Pointer work = gCtx.allocate(lwork[0] * Sizeof.DOUBLE);\n+ Pointer tau = gCtx.allocate(Math.max(m, m) * Sizeof.DOUBLE);\n+ Pointer devInfo = gCtx.allocate(Sizeof.INT);\n+ JCusolverDn.cusolverDnDgeqrf(gCtx.getCusolverDnHandle(), m, n, A, m, tau, work, lwork[0], devInfo);\n+\n+ int[] qrError = {-1};\n+ cudaMemcpy(Pointer.to(qrError), devInfo, Sizeof.INT, cudaMemcpyDeviceToHost);\n+ if (qrError[0] != 0) {\n+ throw new DMLRuntimeException(\"GPU : Error in call to geqrf (QR factorization) as part of solve, argument \" + qrError[0] + \" was wrong\");\n+ }\n+\n+ // step 5: compute Q^T*B\n+ JCusolverDn.cusolverDnDormqr(gCtx.getCusolverDnHandle(), cublasSideMode.CUBLAS_SIDE_LEFT, cublasOperation.CUBLAS_OP_T, m, 1, n, A, m, tau, b, m, work, lwork[0], devInfo);\n+ cudaMemcpy(Pointer.to(qrError), devInfo, Sizeof.INT, cudaMemcpyDeviceToHost);\n+ if (qrError[0] != 0) {\n+ throw new DMLRuntimeException(\"GPU : Error in call to ormqr (to compuete Q^T*B after QR factorization) as part of solve, argument \" + qrError[0] + \" was wrong\");\n+ }\n+\n+ // step 6: compute x = R \\ Q^T*B\n+ JCublas2.cublasDtrsm(gCtx.getCublasHandle(),\n+ cublasSideMode.CUBLAS_SIDE_LEFT, cublasFillMode.CUBLAS_FILL_MODE_UPPER, cublasOperation.CUBLAS_OP_N, cublasDiagType.CUBLAS_DIAG_NON_UNIT,\n+ n, 1, pointerTo(1.0), A, m, b, m);\n+\n+ bTobj.denseColumnMajorToRowMajor();\n+\n+ // TODO : Find a way to assign bTobj directly to the output and set the correct flags so as to not crash\n+ // There is an avoidable copy happening here\n+ MatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, instName, outputName);\n+ cudaMemcpy(out.getGPUObject(gCtx).getJcudaDenseMatrixPtr(), bTobj.getJcudaDenseMatrixPtr(), n * 1 * Sizeof.DOUBLE, cudaMemcpyDeviceToDevice);\n+\n+ gCtx.cudaFreeHelper(work);\n+ gCtx.cudaFreeHelper(tau);\n+ gCtx.cudaFreeHelper(tau);\n+ ATobj.clearData();\n+ bTobj.clearData();\n+\n+ //debugPrintMatrix(b, n, 1);\n+\n+\n+ } else if (isInSparseFormat(gCtx, in1) && isInSparseFormat(gCtx, in2)) { // Both sparse\n+ throw new DMLRuntimeException(\"GPU : solve on sparse inputs not supported\");\n+ } else if (!isInSparseFormat(gCtx, in1) && isInSparseFormat(gCtx, in2)) { // A is dense, b is sparse\n+ // Pointer A = getDensePointer(gCtx, in1, instName);\n+ // Pointer B = getDensePointer(gCtx, in2, instName);\n+ throw new DMLRuntimeException(\"GPU : solve on sparse inputs not supported\");\n+ } else if (isInSparseFormat(gCtx, in1) && !isInSparseFormat(gCtx, in2)) { // A is sparse, b is dense\n+ throw new DMLRuntimeException(\"GPU : solve on sparse inputs not supported\");\n+ }\n+\n+\n+ }\n+\n//********************************************************************/\n//***************** END OF Builtin Functions ************************/\n//********************************************************************/\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1034] Initial implementation of "solve" for GPU
Closes #476 |
49,761 | 01.05.2017 11:50:23 | 25,200 | 5aa6fb75b066228d723dfc88bc0ae24551812e92 | New Jupyter Python Notebook to showcase SystemML 2-layer autoencoder for acoustic signal modeling
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "samples/jupyter-notebooks/Autoencoder.ipynb",
"diff": "+{\n+ \"cells\": [\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Autoencoder\\n\",\n+ \"This notebook demonstrates the invocation of the SystemML autoencoder script, and alternative ways of passing in/out data.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"#!pip install --user systemml>0.13.0\\n\",\n+ \"!pip show systemml\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"import pandas as pd\\n\",\n+ \"from systemml import MLContext, dml\\n\",\n+ \"ml = MLContext(sc)\\n\",\n+ \"print(ml.info())\\n\",\n+ \"sc.version\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## SystemML Read/Write data from local file system\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"FsPath = \\\"/tmp/data/\\\"\\n\",\n+ \"inp = FsPath + \\\"Input/\\\"\\n\",\n+ \"outp = FsPath + \\\"Output/\\\"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Generate Data and write out to file.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"X_pd = pd.DataFrame(range(1, 2001,1),dtype=float).values.reshape(100,20)\\n\",\n+ \"script =\\\"\\\"\\\"\\n\",\n+ \" write(X, $Xfile)\\n\",\n+ \"\\\"\\\"\\\"\\n\",\n+ \"prog = dml(script).input(X=X_pd).input(**{\\\"$Xfile\\\":inp+\\\"X.csv\\\"})\\n\",\n+ \"ml.execute(prog)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!ls -l /tmp/data/Input\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"autoencoderURL = \\\"https://raw.githubusercontent.com/apache/incubator-systemml/master/scripts/staging/autoencoder-2layer.dml\\\"\\n\",\n+ \"rets = (\\\"iter\\\", \\\"num_iters_per_epoch\\\", \\\"beg\\\", \\\"end\\\", \\\"o\\\")\\n\",\n+ \"\\n\",\n+ \"prog = dml(autoencoderURL).input(**{\\\"$X\\\":inp+\\\"X.csv\\\"}) \\\\\\n\",\n+ \" .input(**{\\\"$H1\\\":500, \\\"$H2\\\":2, \\\"$BATCH\\\":36, \\\"$EPOCH\\\":5 \\\\\\n\",\n+ \" , \\\"$W1_out\\\":outp+\\\"W1_out\\\", \\\"$b1_out\\\":outp+\\\"b1_out\\\" \\\\\\n\",\n+ \" , \\\"$W2_out\\\":outp+\\\"W2_out\\\", \\\"$b2_out\\\":outp+\\\"b2_out\\\" \\\\\\n\",\n+ \" , \\\"$W3_out\\\":outp+\\\"W3_out\\\", \\\"$b3_out\\\":outp+\\\"b3_out\\\" \\\\\\n\",\n+ \" , \\\"$W4_out\\\":outp+\\\"W4_out\\\", \\\"$b4_out\\\":outp+\\\"b4_out\\\" \\\\\\n\",\n+ \" }).output(*rets)\\n\",\n+ \"iter, num_iters_per_epoch, beg, end, o = ml.execute(prog).get(*rets)\\n\",\n+ \"print (iter, num_iters_per_epoch, beg, end, o)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!ls -l /tmp/data/Output\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"## Alternatively to passing in/out file names, use Python variables.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": false,\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"autoencoderURL = \\\"https://raw.githubusercontent.com/apache/incubator-systemml/master/scripts/staging/autoencoder-2layer.dml\\\"\\n\",\n+ \"rets = (\\\"iter\\\", \\\"num_iters_per_epoch\\\", \\\"beg\\\", \\\"end\\\", \\\"o\\\")\\n\",\n+ \"rets2 = (\\\"W1\\\", \\\"b1\\\", \\\"W2\\\", \\\"b2\\\", \\\"W3\\\", \\\"b3\\\", \\\"W4\\\", \\\"b4\\\")\\n\",\n+ \"\\n\",\n+ \"prog = dml(autoencoderURL).input(X=X_pd) \\\\\\n\",\n+ \" .input(**{ \\\"$H1\\\":500, \\\"$H2\\\":2, \\\"$BATCH\\\":36, \\\"$EPOCH\\\":5}) \\\\\\n\",\n+ \" .output(*rets) \\\\\\n\",\n+ \" .output(*rets2)\\n\",\n+ \"result = ml.execute(prog)\\n\",\n+ \"iter, num_iters_per_epoch, beg, end, o = result.get(*rets)\\n\",\n+ \"W1, b1, W2, b2, W3, b3, W4, b4 = result.get(*rets2)\\n\",\n+ \"\\n\",\n+ \"print (iter, num_iters_per_epoch, beg, end, o)\\n\",\n+ \"W1.toDF().head(3)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": []\n+ }\n+ ],\n+ \"metadata\": {\n+ \"kernelspec\": {\n+ \"display_name\": \"Python 2\",\n+ \"language\": \"python\",\n+ \"name\": \"python2\"\n+ },\n+ \"language_info\": {\n+ \"codemirror_mode\": {\n+ \"name\": \"ipython\",\n+ \"version\": 2\n+ },\n+ \"file_extension\": \".py\",\n+ \"mimetype\": \"text/x-python\",\n+ \"name\": \"python\",\n+ \"nbconvert_exporter\": \"python\",\n+ \"pygments_lexer\": \"ipython2\",\n+ \"version\": \"2.7.11\"\n+ }\n+ },\n+ \"nbformat\": 4,\n+ \"nbformat_minor\": 0\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1571] New Jupyter Python Notebook to showcase SystemML 2-layer autoencoder for acoustic signal modeling
Closes #478. |
49,728 | 01.05.2017 12:02:27 | 25,200 | 7ce130855d66d94b79f1dabcb4eb584eccc9663c | Scalable linear algebra
DML implementations and accompanying DML tests for
Cholesky, LU, QR, Solve, Inverse and util functions.
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/cholesky.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/triangular_inv.dml\") as inv\n+\n+Choleskey = function(Matrix[double] A, int nb)\n+ return(Matrix[double] L) {\n+ n = ncol(A)\n+\n+ if (n <= nb) {\n+ L = cholesky(A)\n+ } else {\n+ k = as.integer(floor(n/2))\n+ A11 = A[1:k,1:k]\n+ A21 = A[k+1:n,1:k]\n+ A22 = A[k+1:n,k+1:n]\n+\n+ L11 = Choleskey(A11, nb)\n+ L11inv = inv::U_triangular_inv(t(L11))\n+ L21 = A21 %*% L11inv\n+ A22 = A22 - L21 %*% t(L21)\n+ L22 = Choleskey(A22, nb)\n+ L12 = matrix(0, rows=nrow(L11), cols=ncol(L22))\n+\n+ L = rbind(cbind(L11, L12), cbind(L21, L22))\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/inverse.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/triangular_inv.dml\") as tinv\n+source(\"scalable_linalg/qr.dml\") as decomp\n+\n+Inverse = function(Matrix[double] A, int nb)\n+ return(Matrix[double] X) {\n+ # TODO: Some recent papers discuss Block-Recursive algorithm for\n+ # matrix inverse which can be explored instead of QR decomposition\n+\n+ [Q, R] = decomp::QR(A, nb)\n+\n+ Rinv = tinv::U_triangular_inv(R)\n+\n+ X = R %*% t(Q)\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/lu.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/triangular_inv.dml\") as inv\n+\n+LU = function(Matrix[double] A, int nb)\n+ return(Matrix[double] P, Matrix[double] L, Matrix[double] U) {\n+ n = ncol(A)\n+ if (n <= nb) {\n+ [P, L, U] = lu(A)\n+ } else {\n+ k = as.integer(floor(n/2))\n+ A11 = A[1:k,1:k]\n+ A12 = A[1:k,k+1:n]\n+ A21 = A[k+1:n,1:k]\n+ A22 = A[k+1:n,k+1:n]\n+\n+ [P11, L11, U11] = LU(A11, nb)\n+ L11inv = inv::L_triangular_inv(L11)\n+ U11inv = inv::U_triangular_inv(U11)\n+ U12 = L11inv %*% (P11 %*% A12)\n+ A22 = A22 - A21 %*% U11inv %*% U12\n+ [P22, L22, U22] = LU(A22, nb)\n+ L21 = P22 %*% A21 %*% U11inv\n+\n+ Z12 = matrix(0, rows=nrow(A11), cols=ncol(A22))\n+ Z21 = matrix(0, rows=nrow(A22), cols=ncol(A11))\n+\n+ L = rbind(cbind(L11, Z12), cbind(L21, L22))\n+ U = rbind(cbind(U11, U12), cbind(Z21, U22))\n+ P = rbind(cbind(P11, Z12), cbind(Z21, P22))\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/qr.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# calculate Q from Housholder matrix\n+QfromH = function(Matrix[double] H)\n+ return(Matrix[double] Q) {\n+ m = nrow(H);\n+ n = ncol(H);\n+ ones = matrix(1, m, 1);\n+ eye = diag(ones);\n+ Q = eye[,1:n];\n+\n+ for (j in n:1) {\n+ v = H[j:m,j]\n+ b = as.scalar(2/(t(v) %*% v))\n+ Q[j:m, j:n] = Q[j:m, j:n] - (b * v) %*% (t(v) %*% Q[j:m, j:n])\n+ }\n+}\n+\n+QR = function(Matrix[double] A, int nb)\n+ return(Matrix[double] Q, Matrix[double] R) {\n+ n = ncol(A)\n+\n+ if (n <= nb) {\n+ [H, R] = qr(A)\n+ Q = QfromH(H)\n+ R = R[1:n, 1:n]\n+ }\n+ else {\n+ k = floor(n/2)\n+ A1 = A[,1:k]\n+ A2 = A[,k+1:n]\n+\n+ [Q1, R11] = QR(A1, nb)\n+ R12 = t(Q1) %*% A2\n+ A2 = A2 - Q1 %*% R12\n+ [Q2, R22] = QR(A2, nb)\n+ R21 = matrix(0, rows = nrow(R22), cols = ncol(R11))\n+\n+ Q = cbind(Q1, Q2)\n+ R = rbind(cbind(R11, R12), cbind(R21, R22))\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/solve.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/triangular_inv.dml\") as inv\n+source(\"scalable_linalg/qr.dml\") as decomp\n+\n+Solve = function(Matrix[double] A, Matrix[double] b, int nb)\n+ return(Matrix[double] x) {\n+ # TODO: using thin QR may accumulate higher numerical erros.\n+ # Some modifications as suggested by Golub may be explored\n+\n+ [Q, R] = decomp::QR(A, nb)\n+\n+ Rinv = inv::U_triangular_inv(R)\n+\n+ x = Rinv %*% t(Q) %*% b\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/scalable_linalg",
"diff": "+../../scalable_linalg\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/test_Cholesky.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/cholesky.dml\") as decomp\n+\n+test_Cholesky = function() {\n+ print(\"Testing Choleskey Decomposition.\")\n+ n = 1000\n+ b = 100 # smallest block size\n+ eps = n*n*1e-12 # for lareger matrices eps should be larger\n+\n+ # create a symmetric random matrix\n+ A = rand(rows=n, cols=n, min=-1.0, max=1.0, pdf=\"uniform\")\n+ X = t(A) %*% A\n+\n+ L = decomp::Choleskey(X, b)\n+\n+ # check if X = LL^T. Infinity norm of (X - LL^T) must be close to zero\n+ diff = X - L %*% t(L)\n+ sup_norm = max(abs(diff))\n+ if (sup_norm > eps) {\n+ print(\"ERROR: Cholesky decomposition does not reproduce original matrix\")\n+ }\n+}\n+\n+tmp = test_Cholesky()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/test_LU.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/lu.dml\") as decomp\n+\n+test_LU = function() {\n+ print(\"Testing LU Decomposition.\")\n+ n = 1000\n+ b = 100 # smallest block size\n+ eps = n*n*1e-12 # for lareger matrices eps should be larger\n+\n+ # create random square matrix\n+ A = rand(rows=n, cols=n, min=-1.0, max=1.0, pdf=\"uniform\")\n+\n+ [P, L, U] = decomp::LU(A, b)\n+\n+ # check if PA = LU. Infinity norm of (PA - LU) must be close to zero\n+ diff = P %*% A - L %*% U\n+ sup_norm = max(abs(diff))\n+ print(sup_norm)\n+ if (sup_norm > eps) {\n+ print(\"ERROR: LU decomposition does not reproduce original matrix\")\n+ }\n+}\n+\n+tmp = test_LU()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/test_QR.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/qr.dml\") as decomp\n+\n+test_QR = function() {\n+ print(\"Testing QR Decomposition.\")\n+ n = 1000\n+ m = 2000\n+ b = 100 # smallest block size\n+ eps = n*m*1e-12 # for lareger matrices eps should be larger\n+\n+ # create a symmetric random matrix\n+ A = rand(rows=m, cols=n, min=-1.0, max=1.0, pdf=\"uniform\")\n+\n+ [Q, R] = decomp::QR(A, b)\n+\n+ # check if A = QR. Infinity norm of (A - QR) must be close to zero\n+ diff = A - Q %*% R\n+ sup_norm = max(abs(diff))\n+ if (sup_norm > eps) {\n+ print(\"ERROR: QR decomposition does not reproduce original matrix\")\n+ }\n+}\n+\n+tmp = test_QR()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/test_all.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"test_inverse.dml\") as test1\n+source(\"test_LU.dml\") as test2\n+source(\"test_QR.dml\") as test3\n+source(\"test_Cholesky.dml\") as test4\n+\n+print(\"Testing Scalable Linalg\")\n+\n+tmp = test1::test_inverse()\n+tmp = test2::test_LU()\n+#tmp = test3::test_QR()\n+tmp = test4::test_Cholesky()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/test_inverse.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/inverse.dml\") as inv\n+\n+test_inverse = function() {\n+ print(\"Testing inverse of square matrices\")\n+ n = 1000\n+ b = 100 # smallest block size\n+ eps = n*n*1e-12\n+\n+ A = rand(rows=n, cols=n, min=-1.0, max=1.0, pdf=\"uniform\")\n+\n+ Ainv = inv::Inverse(A, b)\n+\n+ # this product should produce identity matrix\n+ AAinv = A %*% Ainv\n+\n+ # create identity matrix\n+ ones = matrix(1.0, n, 1);\n+ I = diag(ones);\n+\n+ # check if AA^-1 = I. Infinity norm of (I - AA^-1) must be close to zero\n+ diff = I - AAinv\n+ sup_norm = max(abs(diff))\n+ if (sup_norm > eps) {\n+ print(\"ERROR: inverse of square matrix fails\")\n+ }\n+}\n+\n+tmp = test_inverse()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/test_solve.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/solve.dml\") as lsq\n+\n+test_solve = function() {\n+ print(\"Testing linear Solve function.\")\n+ n = 1000\n+ m = 2000\n+ b = 100\n+ eps = 1e-9\n+\n+ # generate data for test\n+ A = rand(rows=m, cols=n, min=-2, max=2)\n+ x = rand(rows=n, cols=1, min=-5, max=5)\n+ y = A %*% x\n+ x1 = lsq::Solve(A, y, b)\n+\n+ diff = abs(x - x1)\n+ sup_norm = max(diff)\n+ if (sup_norm > eps) {\n+ print(\"ERROR: least squares solve fails\")\n+ }\n+}\n+\n+tmp = test_solve()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/test_triangular_inv.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+source(\"scalable_linalg/triangular_inv.dml\") as inv\n+\n+test_inverse = function() {\n+ print(\"Testing inverse of triangular matrices\")\n+ n = 1000\n+ eps = n*n*1e-12\n+\n+ A = rand(rows=n, cols=n, min=-1.0, max=1.0, pdf=\"uniform\")\n+\n+ # create lower/upper triangular matrices using lu deomposition\n+ [P,L,U] = lu(A)\n+\n+ # calculate inverse\n+ Linv = inv::L_triangular_inv(L)\n+ Uinv = inv::U_triangular_inv(U)\n+\n+ # these products should produce identity matrices\n+ LLinv = L %*% Linv\n+ UUinv = U %*% Uinv\n+\n+ # create identity matrix\n+ ones = matrix(1.0, n, 1);\n+ I = diag(ones);\n+\n+ # check if LL^-1 = I. Infinity norm of (I - LL^-1) must be close to zero\n+ diff = I - LLinv\n+ sup_norm = max(abs(diff))\n+ if (sup_norm > eps) {\n+ print(\"ERROR: inverse of lower triangular matrix fails\")\n+ }\n+\n+ # check if UU^-1 = I. Inifinity norm of (I - UU^-1) must be close to zero\n+ diff = I - UUinv\n+ sup_norm = max(abs(diff))\n+\n+ if (sup_norm > eps) {\n+ print(\"ERROR: inverse of upper triangular matrix fails\")\n+ }\n+}\n+\n+tmp = test_inverse()\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/test/utils.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+rel_error = function(double true_val, double approx)\n+ return(double err) {\n+ err = abs(true_val - approx)/max(abs(true_val), 1e-9)\n+ }\n+\n+check_equal = function(double x1, double x2, double eps)\n+ return (boolean eq) {\n+ eq = TRUE\n+ diff = abs(x1 - x2)\n+ largest = max(abs(x2), abs(x2))\n+\n+ if (diff > largest*eps) {\n+ print(\"ERROR: vlaues not equal: \" + x1 + \" != \" + x2 )\n+ eq = FALSE\n+ }\n+ }\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/staging/scalable_linalg/triangular_inv.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+\n+# Inverse of lower triangular matrix\n+L_triangular_inv = function(Matrix[double] L)\n+ return(Matrix[double] A) {\n+ n = ncol(L)\n+\n+ if (n == 1) {\n+ A = 1/L[1,1]\n+ } else if (n == 2) {\n+ A = matrix(0, rows=2, cols=2)\n+ A[1,1] = L[2,2]\n+ A[2,2] = L[1,1]\n+ A[2,1] = -L[2,1]\n+ A = A/(as.scalar(L[1,1] * L[2,2]))\n+ } else {\n+ k = as.integer(floor(n/2))\n+\n+ L11 = L[1:k,1:k]\n+ L21 = L[k+1:n,1:k]\n+ L22 = L[k+1:n,k+1:n]\n+\n+ A11 = L_triangular_inv(L11)\n+ A22 = L_triangular_inv(L22)\n+ A12 = matrix(0, rows=nrow(A11), cols=ncol(A22))\n+ A21 = -A22 %*% L21 %*% A11\n+\n+ A = rbind(cbind(A11, A12), cbind(A21, A22))\n+ }\n+ }\n+\n+# Inverse of upper triangular matrix\n+U_triangular_inv = function(Matrix[double] U)\n+ return(Matrix[double] A) {\n+ n = ncol(U)\n+\n+ if (n == 1) {\n+ A = 1/U[1,1]\n+ } else if (n == 2) {\n+ A = matrix(0, rows=2, cols=2)\n+ A[1,1] = U[2,2]\n+ A[2,2] = U[1,1]\n+\n+ A[1,2] = -U[1,2]\n+ A = A/(as.scalar(U[1,1] * U[2,2]))\n+ } else {\n+ k = as.integer(floor(n/2))\n+\n+ U11 = U[1:k,1:k]\n+ U12 = U[1:k,k+1:n]\n+ U22 = U[k+1:n,k+1:n]\n+\n+ A11 = U_triangular_inv(U11)\n+ A22 = U_triangular_inv(U22)\n+ A12 = -A11 %*% U12 %*% A22\n+ A21 = matrix(0, rows=nrow(A22), cols=ncol(A11))\n+\n+ A = rbind(cbind(A11, A12), cbind(A21, A22))\n+ }\n+ }\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1213] Scalable linear algebra
DML implementations and accompanying DML tests for
Cholesky, LU, QR, Solve, Inverse and util functions.
Closes #368. |
49,736 | 01.05.2017 15:08:39 | 28,800 | 8e5599dd9fa94a1b4467ea2866c7203aeac90d12 | [HOTFIX] Adding antlr runtime and wink jars | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<artifactId>maven-jar-plugin</artifactId>\n<executions>\n<execution>\n+ <id>default-jar</id>\n<goals><goal>jar</goal></goals>\n<phase>package</phase>\n<configuration>\n<Main-Class>org.apache.sysml.api.DMLScript</Main-Class>\n</manifestEntries>\n</archive>\n- <classifier>lite</classifier>\n<excludes>\n<exclude>**/caffe/*</exclude>\n<exclude>**/org/tensorflow/*</exclude>\n<phase>package</phase>\n<configuration>\n<target name=\"copy and rename JAR\">\n- <move file=\"${project.build.directory}/${project.artifactId}-${project.version}-lite.jar\" tofile=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\" />\n<copy file=\"${project.build.directory}/${project.artifactId}-${project.version}.jar\" tofile=\"${project.build.directory}/SystemML.jar\" />\n</target>\n</configuration>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] Adding antlr runtime and wink jars |
49,736 | 01.05.2017 15:55:44 | 28,800 | 1cc219527c88a85b256f3c2230e06b176b4fd679 | Allow Python mllearn estimators to force the usage of GPU
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContext.java",
"diff": "@@ -99,6 +99,11 @@ public class MLContext {\n*/\nprivate boolean gpu = false;\n+ /**\n+ * Whether or not GPU mode should be force\n+ */\n+ private boolean forceGPU = false;\n+\n/**\n* The number of heavy hitters that are printed as part of the statistics\n* option\n@@ -273,6 +278,7 @@ public class MLContext {\nscriptExecutor.setExplain(explain);\nscriptExecutor.setExplainLevel(explainLevel);\nscriptExecutor.setGPU(gpu);\n+ scriptExecutor.setForceGPU(forceGPU);\nscriptExecutor.setStatistics(statistics);\nscriptExecutor.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\nscriptExecutor.setInit(scriptHistoryStrings.isEmpty());\n@@ -420,6 +426,16 @@ public class MLContext {\nthis.gpu = enable;\n}\n+ /**\n+ * Whether or not to explicitly \"force\" the usage of GPU.\n+ * If a GPU is not available, and the GPU mode is set or if available memory on GPU is less, SystemML will crash when the program is run.\n+ * @param enable\n+ * true if needs to be enabled, false otherwise\n+ */\n+ public void setForceGPU(boolean enable) {\n+ this.forceGPU = enable;\n+ }\n+\n/**\n* Whether or not the GPU mode is enabled.\n* @return true if enabled, false otherwise\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -118,6 +118,8 @@ public class ScriptExecutor {\nprotected boolean explain = false;\nprotected boolean gpu = false;\nprotected boolean oldGPU = false;\n+ protected boolean forceGPU = false;\n+ protected boolean oldForceGPU = false;\nprotected boolean statistics = false;\nprotected boolean oldStatistics = false;\nprotected ExplainLevel explainLevel;\n@@ -248,7 +250,9 @@ public class ScriptExecutor {\n}\noldGPU = DMLScript.USE_ACCELERATOR;\noldStatistics = DMLScript.STATISTICS;\n+ oldForceGPU = DMLScript.FORCE_ACCELERATOR;\nDMLScript.USE_ACCELERATOR = gpu;\n+ DMLScript.FORCE_ACCELERATOR = forceGPU;\nDMLScript.STATISTICS = statistics;\n}\n@@ -335,6 +339,7 @@ public class ScriptExecutor {\nprotected void cleanupAfterExecution() {\nrestoreInputsInSymbolTable();\nDMLScript.USE_ACCELERATOR = oldGPU;\n+ DMLScript.FORCE_ACCELERATOR = oldForceGPU;\nDMLScript.STATISTICS = oldStatistics;\n}\n@@ -653,4 +658,13 @@ public class ScriptExecutor {\nthis.gpu = enabled;\n}\n+ /**\n+ * Whether or not to force GPU usage\n+ * @param enabled\n+ * true if enabled, false otherwise\n+ */\n+ public void setForceGPU(boolean enabled) {\n+ this.forceGPU = enabled;\n+ }\n+\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mlcontext.py",
"new_path": "src/main/python/systemml/mlcontext.py",
"diff": "@@ -362,6 +362,17 @@ class MLContext(object):\nself._ml.setGPU(bool(enable))\nreturn self\n+ def setForceGPU(self, enable):\n+ \"\"\"\n+ Whether or not to force the usage of GPU operators.\n+\n+ Parameters\n+ ----------\n+ enable: boolean\n+ \"\"\"\n+ self._ml.setForceGPU(bool(enable))\n+ return self\n+\ndef setStatisticsMaxHeavyHitters(self, maxHeavyHitters):\n\"\"\"\nThe maximum number of heavy hitters that are printed as part of the statistics.\n@@ -397,6 +408,18 @@ class MLContext(object):\nself._ml.setExplainLevel(explainLevel)\nreturn self\n+ def setConfigProperty(self, propertyName, propertyValue):\n+ \"\"\"\n+ Set configuration property, such as setConfigProperty(\"localtmpdir\", \"/tmp/systemml\").\n+\n+ Parameters\n+ ----------\n+ propertyName: String\n+ propertyValue: String\n+ \"\"\"\n+ self._ml.setConfigProperty(propertyName, propertyValue)\n+ return self\n+\ndef version(self):\n\"\"\"Display the project version.\"\"\"\nreturn self._ml.version()\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/python/systemml/mllearn/estimators.py",
"new_path": "src/main/python/systemml/mllearn/estimators.py",
"diff": "@@ -67,19 +67,71 @@ class BaseSystemMLEstimator(Estimator):\n\"\"\"\nself.label_col = colName\n- def setGPU(self, enableGPU):\n- self.estimator.setGPU(enableGPU)\n+ def setGPU(self, enable):\n+ \"\"\"\n+ Whether or not to enable GPU.\n+\n+ Parameters\n+ ----------\n+ enable: boolean\n+ \"\"\"\n+ self.estimator.setGPU(enable)\n+ return self\n+\n+ def setForceGPU(self, enable):\n+ \"\"\"\n+ Whether or not to force the usage of GPU operators.\n+\n+ Parameters\n+ ----------\n+ enable: boolean\n+ \"\"\"\n+ self.estimator.setForceGPU(enable)\nreturn self\ndef setExplain(self, explain):\n+ \"\"\"\n+ Explanation about the program. Mainly intended for developers.\n+\n+ Parameters\n+ ----------\n+ explain: boolean\n+ \"\"\"\nself.estimator.setExplain(explain)\nreturn self\n- def setStatistics(self, stat):\n- self.estimator.setStatistics(stat)\n+ def setStatistics(self, statistics):\n+ \"\"\"\n+ Whether or not to output statistics (such as execution time, elapsed time)\n+ about script executions.\n+\n+ Parameters\n+ ----------\n+ statistics: boolean\n+ \"\"\"\n+ self.estimator.setStatistics(statistics)\n+ return self\n+\n+ def setStatisticsMaxHeavyHitters(self, maxHeavyHitters):\n+ \"\"\"\n+ The maximum number of heavy hitters that are printed as part of the statistics.\n+\n+ Parameters\n+ ----------\n+ maxHeavyHitters: int\n+ \"\"\"\n+ self.estimator.setStatisticsMaxHeavyHitters(maxHeavyHitters)\nreturn self\ndef setConfigProperty(self, propertyName, propertyValue):\n+ \"\"\"\n+ Set configuration property, such as setConfigProperty(\"localtmpdir\", \"/tmp/systemml\").\n+\n+ Parameters\n+ ----------\n+ propertyName: String\n+ propertyValue: String\n+ \"\"\"\nself.estimator.setConfigProperty(propertyName, propertyValue)\nreturn self\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"new_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"diff": "@@ -69,18 +69,24 @@ trait HasRegParam extends Params {\ntrait BaseSystemMLEstimatorOrModel {\nvar enableGPU:Boolean = false\n+ var forceGPU:Boolean = false\nvar explain:Boolean = false\nvar statistics:Boolean = false\n+ var statisticsMaxHeavyHitters:Int = 10\nval config:HashMap[String, String] = new HashMap[String, String]()\ndef setGPU(enableGPU1:Boolean):BaseSystemMLEstimatorOrModel = { enableGPU = enableGPU1; this}\n+ def setForceGPU(enableGPU1:Boolean):BaseSystemMLEstimatorOrModel = { forceGPU = enableGPU1; this}\ndef setExplain(explain1:Boolean):BaseSystemMLEstimatorOrModel = { explain = explain1; this}\ndef setStatistics(statistics1:Boolean):BaseSystemMLEstimatorOrModel = { statistics = statistics1; this}\n+ def setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters1:Int):BaseSystemMLEstimatorOrModel = { statisticsMaxHeavyHitters = statisticsMaxHeavyHitters1; this}\ndef setConfigProperty(key:String, value:String):BaseSystemMLEstimatorOrModel = { config.put(key, value); this}\ndef updateML(ml:MLContext):Unit = {\nml.setGPU(enableGPU); ml.setExplain(explain); ml.setStatistics(statistics); config.map(x => ml.setConfigProperty(x._1, x._2))\n}\ndef copyProperties(other:BaseSystemMLEstimatorOrModel):BaseSystemMLEstimatorOrModel = {\n- other.setGPU(enableGPU); other.setExplain(explain); other.setStatistics(statistics); config.map(x => other.setConfigProperty(x._1, x._2))\n+ other.setGPU(enableGPU); other.setForceGPU(forceGPU);\n+ other.setExplain(explain); other.setStatistics(statistics); other.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\n+ config.map(x => other.setConfigProperty(x._1, x._2))\nreturn other\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1552] Allow Python mllearn estimators to force the usage of GPU
Closes #480. |
49,717 | 01.05.2017 17:50:44 | 25,200 | 7989ab4f39802d0706618d495d06cb8126f98300 | [HOTFIX] changes setGPU and setForceGPU to do the right thing in mlctx | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -248,11 +248,7 @@ public class ScriptExecutor {\nif (symbolTable != null) {\nexecutionContext.setVariables(symbolTable);\n}\n- oldGPU = DMLScript.USE_ACCELERATOR;\noldStatistics = DMLScript.STATISTICS;\n- oldForceGPU = DMLScript.FORCE_ACCELERATOR;\n- DMLScript.USE_ACCELERATOR = gpu;\n- DMLScript.FORCE_ACCELERATOR = forceGPU;\nDMLScript.STATISTICS = statistics;\n}\n@@ -656,6 +652,8 @@ public class ScriptExecutor {\n*/\npublic void setGPU(boolean enabled) {\nthis.gpu = enabled;\n+ oldGPU = DMLScript.USE_ACCELERATOR;\n+ DMLScript.USE_ACCELERATOR = gpu;\n}\n/**\n@@ -665,6 +663,8 @@ public class ScriptExecutor {\n*/\npublic void setForceGPU(boolean enabled) {\nthis.forceGPU = enabled;\n+ oldForceGPU = DMLScript.FORCE_ACCELERATOR;\n+ DMLScript.FORCE_ACCELERATOR = forceGPU;\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] changes setGPU and setForceGPU to do the right thing in mlctx |
49,736 | 02.05.2017 10:25:01 | 28,800 | 8324b69f11fb71890e0b592603e759c68f4db87f | [HOTFIX] Allows multiple MLContext to set the configuration property
Also, added bugfix in mllearn to enable force GPU option. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/ScriptExecutor.java",
"diff": "@@ -248,8 +248,30 @@ public class ScriptExecutor {\nif (symbolTable != null) {\nexecutionContext.setVariables(symbolTable);\n}\n+\n+ }\n+\n+ /**\n+ * Set the global flags (for example: statistics, gpu, etc).\n+ */\n+ protected void setGlobalFlags() {\noldStatistics = DMLScript.STATISTICS;\nDMLScript.STATISTICS = statistics;\n+ oldForceGPU = DMLScript.FORCE_ACCELERATOR;\n+ DMLScript.FORCE_ACCELERATOR = forceGPU;\n+ oldGPU = DMLScript.USE_ACCELERATOR;\n+ DMLScript.USE_ACCELERATOR = gpu;\n+ DMLScript.STATISTICS_COUNT = statisticsMaxHeavyHitters;\n+ }\n+\n+ /**\n+ * Reset the global flags (for example: statistics, gpu, etc) post-execution.\n+ */\n+ protected void resetGlobalFlags() {\n+ DMLScript.STATISTICS = oldStatistics;\n+ DMLScript.FORCE_ACCELERATOR = oldForceGPU;\n+ DMLScript.USE_ACCELERATOR = oldGPU;\n+ DMLScript.STATISTICS_COUNT = 10;\n}\n/**\n@@ -327,6 +349,7 @@ public class ScriptExecutor {\nscript.setScriptExecutor(this);\n// Set global variable indicating the script type\nDMLScript.SCRIPT_TYPE = script.getScriptType();\n+ setGlobalFlags();\n}\n/**\n@@ -334,9 +357,7 @@ public class ScriptExecutor {\n*/\nprotected void cleanupAfterExecution() {\nrestoreInputsInSymbolTable();\n- DMLScript.USE_ACCELERATOR = oldGPU;\n- DMLScript.FORCE_ACCELERATOR = oldForceGPU;\n- DMLScript.STATISTICS = oldStatistics;\n+ resetGlobalFlags();\n}\n/**\n@@ -652,8 +673,6 @@ public class ScriptExecutor {\n*/\npublic void setGPU(boolean enabled) {\nthis.gpu = enabled;\n- oldGPU = DMLScript.USE_ACCELERATOR;\n- DMLScript.USE_ACCELERATOR = gpu;\n}\n/**\n@@ -663,8 +682,6 @@ public class ScriptExecutor {\n*/\npublic void setForceGPU(boolean enabled) {\nthis.forceGPU = enabled;\n- oldForceGPU = DMLScript.FORCE_ACCELERATOR;\n- DMLScript.FORCE_ACCELERATOR = forceGPU;\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"new_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"diff": "@@ -81,7 +81,9 @@ trait BaseSystemMLEstimatorOrModel {\ndef setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters1:Int):BaseSystemMLEstimatorOrModel = { statisticsMaxHeavyHitters = statisticsMaxHeavyHitters1; this}\ndef setConfigProperty(key:String, value:String):BaseSystemMLEstimatorOrModel = { config.put(key, value); this}\ndef updateML(ml:MLContext):Unit = {\n- ml.setGPU(enableGPU); ml.setExplain(explain); ml.setStatistics(statistics); config.map(x => ml.setConfigProperty(x._1, x._2))\n+ ml.setGPU(enableGPU); ml.setForceGPU(forceGPU);\n+ ml.setExplain(explain); ml.setStatistics(statistics); ml.setStatisticsMaxHeavyHitters(statisticsMaxHeavyHitters);\n+ config.map(x => ml.setConfigProperty(x._1, x._2))\n}\ndef copyProperties(other:BaseSystemMLEstimatorOrModel):BaseSystemMLEstimatorOrModel = {\nother.setGPU(enableGPU); other.setForceGPU(forceGPU);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] Allows multiple MLContext to set the configuration property
- Also, added bugfix in mllearn to enable force GPU option. |
49,736 | 02.05.2017 15:48:49 | 28,800 | 47c2dd10f43e3f348c4372a8a7b34fa04705930a | [MINOR] Updated documentation and improved log messages
Also, BLAS is disabled by default. We can enable it after more rigorous
testing. | [
{
"change_type": "MODIFY",
"old_path": "conf/SystemML-config.xml.template",
"new_path": "conf/SystemML-config.xml.template",
"diff": "<codegen.literals>1</codegen.literals>\n<!-- enables native blas for matrix multiplication and convolution, experimental feature -->\n- <native.blas>true</native.blas>\n+ <native.blas>false</native.blas>\n<!-- prints extra statistics information for GPU -->\n<systemml.stats.extraGPU>false</systemml.stats.extraGPU>\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/beginners-guide-caffe2dml.md",
"new_path": "docs/beginners-guide-caffe2dml.md",
"diff": "@@ -31,6 +31,40 @@ limitations under the License.\nCaffe2DML is an experimental API that converts an Caffe specification to DML.\n+## Example: Train Lenet\n+\n+1. Install `mlextend` package to get MNIST data: `pip install mlxtend`.\n+2. (Optional but recommended) Follow the steps mentioned in [the user guide]([the user guide of native backend](http://apache.github.io/incubator-systemml/native-backend)) and install Intel MKL.\n+3. Install [SystemML](http://apache.github.io/incubator-systemml/beginners-guide-python#install-systemml).\n+4. Invoke PySpark shell: `pyspark --conf spark.executorEnv.LD_LIBRARY_PATH=/path/to/blas-n-other-dependencies`.\n+\n+```bash\n+# Download the MNIST dataset\n+from mlxtend.data import mnist_data\n+import numpy as np\n+from sklearn.utils import shuffle\n+X, y = mnist_data()\n+X, y = shuffle(X, y)\n+\n+# Split the data into training and test\n+n_samples = len(X)\n+X_train = X[:int(.9 * n_samples)]\n+y_train = y[:int(.9 * n_samples)]\n+X_test = X[int(.9 * n_samples):]\n+y_test = y[int(.9 * n_samples):]\n+\n+# Download the Lenet network\n+import urllib\n+urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet.proto', 'lenet.proto')\n+urllib.urlretrieve('https://raw.githubusercontent.com/niketanpansare/model_zoo/master/caffe/vision/lenet/mnist/lenet_solver.proto', 'lenet_solver.proto')\n+\n+# Train Lenet On MNIST using scikit-learn like API\n+from systemml.mllearn import Caffe2DML\n+lenet = Caffe2DML(sqlCtx, solver='lenet_solver.proto', input_shape=(1, 28, 28)).set(debug=True).setStatistics(True)\n+lenet.fit(X_train, y_train)\n+y_predicted = lenet.predict(X_test)\n+```\n+\n## Frequently asked questions\n- How to set batch size ?\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/index.md",
"new_path": "docs/index.md",
"diff": "@@ -57,6 +57,7 @@ machine in R-like and Python-like declarative languages.\nin Standalone Mode.\n* [JMLC](jmlc) - Java Machine Learning Connector.\n* See [Java Machine Learning Connector (JMLC)](jmlc) for more information.\n+* *Experimental* [Caffe2DML API](http://apache.github.io/incubator-systemml/beginners-guide-caffe2dml) for Deep Learning.\n## Language Guides\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/native-backend.md",
"new_path": "docs/native-backend.md",
"diff": "+---\n+layout: global\n+title: Using SystemML with Native BLAS support\n+description: Using SystemML with Native BLAS support\n+---\n<!--\n{% comment %}\nLicensed to the Apache Software Foundation (ASF) under one or more\n@@ -17,6 +22,11 @@ limitations under the License.\n{% endcomment %}\n-->\n+* This will become a table of contents (this text will be scraped).\n+{:toc}\n+\n+<br/>\n+\n# User Guide\nBy default, SystemML implements all its matrix operations in Java.\n@@ -25,16 +35,16 @@ This simplifies deployment especially in a distributed environment.\nIn some cases (such as deep learning), the user might want to use native BLAS\nrather than SystemML's internal Java library for performing single-node\noperations such matrix multiplication, convolution, etc.\n+\n+To allow SystemML to use native BLAS rather than internal Java library,\n+please set the configuration property `native.blas` to `true`.\n+\nBy default, SystemML will first attempt to use Intel MKL (if installed)\nand then OpenBLAS (if installed).\nIf both Intel MKL and OpenBLAS are not available, SystemML\nfalls back to its internal Java library.\n-To force SystemML to use internal Java library rather than native BLAS,\n-please set the configuration property `native.blas` to `false`.\n-\n-The current version of SystemML only supports BLAS on Linux machines.\n-\n+The current version of SystemML only supports BLAS on **Linux** machines.\n## Step 1: Install BLAS\n@@ -95,19 +105,20 @@ sudo ln -s /lib64/libgomp.so.1 /lib64/libgomp.so\n## Step 3: Provide the location of the native libraries\n-1. Add the location of the native libraries (i.e. BLAS and other dependencies)\n+1. Pass the location of the native libraries using command-line options:\n+\n+- [Spark](http://spark.apache.org/docs/latest/configuration.html): `--conf spark.executorEnv.LD_LIBRARY_PATH=/path/to/blas-n-other-dependencies`\n+- Java: `-Djava.library.path=/path/to/blas-n-other-dependencies`\n+\n+2. Alternatively, you can add the location of the native libraries (i.e. BLAS and other dependencies)\nto the environment variable `LD_LIBRARY_PATH` (on Linux).\nIf you want to use SystemML with Spark, please add the following line to `spark-env.sh`\n+(or to the bash profile).\n```bash\nexport LD_LIBRARY_PATH=/path/to/blas-n-other-dependencies\n- # Or export SPARK_LIBRARY_PATH=/path/to/blas-n-other-dependencies\n```\n-2. Alternatively, you can pass the location of the native libraries using command-line options:\n-\n-- Java: `-Djava.library.path=/path/to/blas-n-other-dependencies`\n-- [Spark](http://spark.apache.org/docs/latest/configuration.html): `--driver-library-path`\n## Common issues on Linux\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -116,7 +116,7 @@ public class DMLConfig\n_defaultVals.put(CODEGEN, \"false\" );\n_defaultVals.put(CODEGEN_PLANCACHE, \"true\" );\n_defaultVals.put(CODEGEN_LITERALS, \"1\" );\n- _defaultVals.put(NATIVE_BLAS, \"true\" );\n+ _defaultVals.put(NATIVE_BLAS, \"false\" );\n_defaultVals.put(EXTRA_GPU_STATS, \"false\" );\n_defaultVals.put(EXTRA_DNN_STATS, \"false\" );\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"diff": "@@ -25,6 +25,7 @@ import org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport java.util.HashMap;\n+import java.util.Vector;\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.io.File;\n@@ -94,24 +95,45 @@ public class NativeHelper {\nif(userSpecifiedBLAS.equalsIgnoreCase(\"\")) {\nblasType = isMKLAvailable() ? \"mkl\" : isOpenBLASAvailable() ? \"openblas\" : null;\nif(blasType == null)\n- LOG.warn(\"Unable to load either MKL or OpenBLAS\");\n+ LOG.info(\"Unable to load either MKL or OpenBLAS. Please set \");\n}\nelse if(userSpecifiedBLAS.equalsIgnoreCase(\"mkl\")) {\nblasType = isMKLAvailable() ? \"mkl\" : null;\nif(blasType == null)\n- LOG.warn(\"Unable to load MKL\");\n+ LOG.info(\"Unable to load MKL\");\n}\nelse if(userSpecifiedBLAS.equalsIgnoreCase(\"openblas\")) {\nblasType = isOpenBLASAvailable() ? \"openblas\" : null;\nif(blasType == null)\n- LOG.warn(\"Unable to load OpenBLAS\");\n+ LOG.info(\"Unable to load OpenBLAS\");\n}\nelse {\n- LOG.warn(\"Unsupported BLAS:\" + userSpecifiedBLAS);\n+ LOG.info(\"Unsupported BLAS:\" + userSpecifiedBLAS);\n}\n// =============================================================================\nif(blasType != null && loadLibraryHelper(\"libsystemml_\" + blasType + \"-Linux-x86_64.so\")) {\n- LOG.info(\"Using native blas: \" + blasType);\n+ String blasPathAndHint = \"\";\n+ // ------------------------------------------------------------\n+ // This logic gets the list of native libraries that are loaded\n+ try {\n+ java.lang.reflect.Field loadedLibraryNamesField = ClassLoader.class.getDeclaredField(\"loadedLibraryNames\");\n+ loadedLibraryNamesField.setAccessible(true);\n+ @SuppressWarnings(\"unchecked\")\n+ Vector<String> libraries = (Vector<String>) loadedLibraryNamesField.get(ClassLoader.getSystemClassLoader());\n+ LOG.debug(\"List of native libraries loaded:\" + libraries);\n+ for(String library : libraries) {\n+ if(library.endsWith(\"libmkl_rt.so\"))\n+ blasPathAndHint = \" from the path \" + library;\n+ else if(library.endsWith(\"libopenblas.so\")) {\n+ blasPathAndHint = \" from the path \" + library + \". Hint: Please make sure that the libopenblas.so is built with GNU OpenMP threading (ldd \" + library + \" | grep libgomp).\";\n+ }\n+ }\n+ } catch (NoSuchFieldException | SecurityException | IllegalArgumentException | IllegalAccessException e) {\n+ LOG.debug(\"Error while finding list of native libraries:\" + e.getMessage());\n+ }\n+ // ------------------------------------------------------------\n+\n+ LOG.info(\"Using native blas: \" + blasType + blasPathAndHint);\nisSystemMLLoaded = true;\n}\n}\n@@ -155,7 +177,7 @@ public class NativeHelper {\n// ------------------------------------------------------------\n// Set environment variable MKL_THREADING_LAYER to GNU on Linux for performance\nif(!loadLibraryHelper(\"libpreload_systemml-Linux-x86_64.so\")) {\n- LOG.warn(\"Unable to load preload_systemml (required for loading MKL-enabled SystemML library)\");\n+ LOG.debug(\"Unable to load preload_systemml (required for loading MKL-enabled SystemML library)\");\nreturn false;\n}\n// The most reliable way in my investigation to ensure that MKL runs smoothly with OpenMP (used by conv2d*)\n@@ -181,9 +203,9 @@ public class NativeHelper {\n}\ncatch (UnsatisfiedLinkError e) {\nif(optionalMsg != null)\n- LOG.warn(\"Unable to load \" + blas + \"(\" + optionalMsg + \"):\" + e.getMessage());\n+ LOG.debug(\"Unable to load \" + blas + \"(\" + optionalMsg + \"):\" + e.getMessage());\nelse\n- LOG.warn(\"Unable to load \" + blas + \":\" + e.getMessage());\n+ LOG.debug(\"Unable to load \" + blas + \":\" + e.getMessage());\nreturn false;\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala",
"diff": "@@ -54,12 +54,9 @@ import org.apache.sysml.runtime.controlprogram.parfor.stat.InfrastructureAnalyze\nobject Caffe2DML {\nval LOG = LogFactory.getLog(classOf[Caffe2DML].getName())\n- def fileSep():String = { if(File.separator.equals(\"\\\\\")) \"\\\\\\\\\" else File.separator }\n- def setNNLibraryPath(path:String):Unit = { prefix = path + fileSep + \"nn\"}\n// ------------------------------------------------------------------------\n- var prefix = Utils.getPrefix()\n- def layerDir = prefix + fileSep + \"layers\" + fileSep\n- def optimDir = prefix + fileSep + \"optim\" + fileSep\n+ def layerDir = \"nn/layers/\"\n+ def optimDir = \"nn/optim/\"\n// Naming conventions:\nval X = \"X\"; val y = \"y\"; val batchSize = \"BATCH_SIZE\"; val numImages = \"num_images\"; val numValidationImages = \"num_validation\"\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/CaffeNetwork.scala",
"diff": "@@ -136,14 +136,14 @@ class CaffeNetwork(netFilePath:String, val currentPhase:Phase,\n// The bottom layers are the layers available in the getBottomList (from Caffe .proto files)\nprivate val _bottomLayers:Map[String, Set[String]] = convertTupleListToMap(\n_caffeLayerParams.flatMap(l => expandBottomList(l.getName, l.getBottomList)))\n- CaffeNetwork.LOG.info(\"Bottom layers:\" + _bottomLayers)\n+ CaffeNetwork.LOG.debug(\"Bottom layers:\" + _bottomLayers)\n// Find the top layers by reversing the bottom list\nprivate val _topLayers:Map[String, Set[String]] = convertTupleListToMap(flipKeyValues(_bottomLayers.toList))\n- CaffeNetwork.LOG.info(\"Top layers:\" + _topLayers)\n+ CaffeNetwork.LOG.debug(\"Top layers:\" + _topLayers)\nprivate val _layers: Map[String, CaffeLayer] = _caffeLayerParams.map(l => l.getName -> convertLayerParameterToCaffeLayer(l)).toMap\n- CaffeNetwork.LOG.info(\"Layers:\" + _layers)\n+ CaffeNetwork.LOG.debug(\"Layers:\" + _layers)\nprivate val _layerIDs: Map[String, Int] = _layers.entrySet().map(x => x.getKey -> x.getValue.id).toMap\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/Utils.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/Utils.scala",
"diff": "@@ -77,18 +77,6 @@ object Utils {\n}\n- def getPrefix():String = {\n- val f = new File(\"nn\")\n- if(f.exists() && f.isDirectory()) {\n- Caffe2DML.LOG.info(\"Since nn directory exists in current folder, using it.\")\n- return \"nn\"\n- }\n- else {\n- // TODO: Extract from the jar\n- throw new RuntimeException(\"In current version, we require that you download the nn folder into current directory from https://github.com/apache/incubator-systemml/tree/master/scripts/staging/SystemML-NN\")\n- }\n- }\n-\n// --------------------------------------------------------------\n// Caffe utility functions\ndef readCaffeNet(netFilePath:String):NetParameter = {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Updated documentation and improved log messages
- Also, BLAS is disabled by default. We can enable it after more rigorous
testing. |
49,736 | 02.05.2017 17:32:53 | 25,200 | aa6d38c94b632d7fb09869a78887df8485ddeaf3 | [MINOR] Show native library paths only when log4j is set debug or lower
level | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"diff": "@@ -115,6 +115,8 @@ public class NativeHelper {\nString blasPathAndHint = \"\";\n// ------------------------------------------------------------\n// This logic gets the list of native libraries that are loaded\n+ if(LOG.isDebugEnabled()) {\n+ // Only perform the checking of library paths when DEBUG is enabled to avoid runtime overhead.\ntry {\njava.lang.reflect.Field loadedLibraryNamesField = ClassLoader.class.getDeclaredField(\"loadedLibraryNames\");\nloadedLibraryNamesField.setAccessible(true);\n@@ -122,15 +124,15 @@ public class NativeHelper {\nVector<String> libraries = (Vector<String>) loadedLibraryNamesField.get(ClassLoader.getSystemClassLoader());\nLOG.debug(\"List of native libraries loaded:\" + libraries);\nfor(String library : libraries) {\n- if(library.endsWith(\"libmkl_rt.so\"))\n+ if(library.contains(\"libmkl_rt\") || library.contains(\"libopenblas\")) {\nblasPathAndHint = \" from the path \" + library;\n- else if(library.endsWith(\"libopenblas.so\")) {\n- blasPathAndHint = \" from the path \" + library + \". Hint: Please make sure that the libopenblas.so is built with GNU OpenMP threading (ldd \" + library + \" | grep libgomp).\";\n+ break;\n}\n}\n} catch (NoSuchFieldException | SecurityException | IllegalArgumentException | IllegalAccessException e) {\nLOG.debug(\"Error while finding list of native libraries:\" + e.getMessage());\n}\n+ }\n// ------------------------------------------------------------\nLOG.info(\"Using native blas: \" + blasType + blasPathAndHint);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Show native library paths only when log4j is set debug or lower
level |
49,736 | 03.05.2017 15:15:18 | 25,200 | 29c307c9ae5f416d2caac7e6e21aeb8d375dd8e5 | Remove the usage of SYSTEMML_BLAS environment variable from NativeHelper | [
{
"change_type": "MODIFY",
"old_path": "conf/SystemML-config.xml.template",
"new_path": "conf/SystemML-config.xml.template",
"diff": "<!-- if codegen.enabled, compile literals as constants: 1..heuristic, 2..always -->\n<codegen.literals>1</codegen.literals>\n- <!-- enables native blas for matrix multiplication and convolution, experimental feature -->\n- <native.blas>false</native.blas>\n+ <!-- enables native blas for matrix multiplication and convolution, experimental feature (options: auto, mkl, openblas, none) -->\n+ <native.blas>auto</native.blas>\n<!-- prints extra statistics information for GPU -->\n<systemml.stats.extraGPU>false</systemml.stats.extraGPU>\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/native-backend.md",
"new_path": "docs/native-backend.md",
"diff": "@@ -37,7 +37,9 @@ rather than SystemML's internal Java library for performing single-node\noperations such matrix multiplication, convolution, etc.\nTo allow SystemML to use native BLAS rather than internal Java library,\n-please set the configuration property `native.blas` to `true`.\n+please set the configuration property `native.blas` to `auto`.\n+Other possible options are: `mkl`, `openblas` and `none`.\n+The first two options will only attempt to use the respective BLAS libraries.\nBy default, SystemML will first attempt to use Intel MKL (if installed)\nand then OpenBLAS (if installed).\n@@ -153,10 +155,6 @@ Make sure that this path is accessible to Java as per instructions provided in t\nThis section describes how to compile shared libraries in the folder `src/main/cpp/lib`.\nThis is required when the developer makes changes to cpp directory or while validating the source package during the release process.\n-To force SystemML to use OpenBLAS instead of Intel MKL if both are installed,\n-please set the environment variable `SYSTEMML_BLAS` to `openblas`.\n-This environment variable is used internally for testing and is not required for users.\n-\n## Intro to CMake\nIf you are familiar with cmake, skip this section.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"new_path": "src/main/java/org/apache/sysml/conf/DMLConfig.java",
"diff": "@@ -116,7 +116,7 @@ public class DMLConfig\n_defaultVals.put(CODEGEN, \"false\" );\n_defaultVals.put(CODEGEN_PLANCACHE, \"true\" );\n_defaultVals.put(CODEGEN_LITERALS, \"1\" );\n- _defaultVals.put(NATIVE_BLAS, \"false\" );\n+ _defaultVals.put(NATIVE_BLAS, \"auto\" );\n_defaultVals.put(EXTRA_GPU_STATS, \"false\" );\n_defaultVals.put(EXTRA_DNN_STATS, \"false\" );\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateBinaryCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/AggregateBinaryCPInstruction.java",
"diff": "package org.apache.sysml.runtime.instructions.cp;\n-import org.apache.sysml.conf.ConfigurationManager;\n-import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n@@ -33,6 +31,7 @@ import org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.operators.AggregateBinaryOperator;\nimport org.apache.sysml.runtime.matrix.operators.AggregateOperator;\nimport org.apache.sysml.runtime.matrix.operators.Operator;\n+import org.apache.sysml.utils.NativeHelper;\npublic class AggregateBinaryCPInstruction extends BinaryCPInstruction\n{\n@@ -81,8 +80,7 @@ public class AggregateBinaryCPInstruction extends BinaryCPInstruction\nif( matBlock2 instanceof CompressedMatrixBlock )\nsoresBlock = (MatrixBlock) (matBlock2.aggregateBinaryOperations(matBlock1, matBlock2, new MatrixBlock(), ab_op));\nelse {\n- boolean enableNative = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.NATIVE_BLAS);\n- soresBlock = (MatrixBlock) (matBlock1.aggregateBinaryOperations(matBlock1, matBlock2, new MatrixBlock(), ab_op, enableNative));\n+ soresBlock = (MatrixBlock) (matBlock1.aggregateBinaryOperations(matBlock1, matBlock2, new MatrixBlock(), ab_op, NativeHelper.isNativeLibraryLoaded()));\n}\n//release inputs/outputs\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"diff": "@@ -22,8 +22,6 @@ package org.apache.sysml.runtime.instructions.cp;\nimport java.util.ArrayList;\nimport java.util.Arrays;\n-import org.apache.sysml.conf.ConfigurationManager;\n-import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.controlprogram.context.ExecutionContext;\nimport org.apache.sysml.runtime.functionobjects.SwapIndex;\n@@ -342,7 +340,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\nint Q = (int) ConvolutionUtils.getQ(W, S, stride_w, pad_w);\nConvolutionParameters params = new ConvolutionParameters(N, C, H, W, K, R, S, stride_h, stride_w, pad_h, pad_w, _numThreads);\n- params.enableNative = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.NATIVE_BLAS) && NativeHelper.isNativeLibraryLoaded();\n+ params.enableNative = NativeHelper.isNativeLibraryLoaded();\nif (instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) {\nif(matBlock.isEmptyBlock()) {\noutputBlock = new MatrixBlock(N, C*P*Q, true);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ConvolutionSPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/spark/ConvolutionSPInstruction.java",
"diff": "@@ -25,8 +25,6 @@ import java.util.Iterator;\nimport org.apache.spark.api.java.JavaPairRDD;\nimport org.apache.spark.api.java.function.PairFlatMapFunction;\nimport org.apache.spark.broadcast.Broadcast;\n-import org.apache.sysml.conf.ConfigurationManager;\n-import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.parser.Expression.DataType;\nimport org.apache.sysml.parser.Expression.ValueType;\nimport org.apache.sysml.runtime.DMLRuntimeException;\n@@ -49,6 +47,7 @@ import org.apache.sysml.runtime.matrix.data.MatrixIndexes;\nimport org.apache.sysml.runtime.matrix.data.OutputInfo;\nimport org.apache.sysml.runtime.matrix.operators.ReorgOperator;\nimport org.apache.sysml.runtime.util.ConvolutionUtils;\n+import org.apache.sysml.utils.NativeHelper;\nimport scala.Tuple2;\n@@ -284,7 +283,7 @@ public class ConvolutionSPInstruction extends UnarySPInstruction {\nint Q = (int) ConvolutionUtils.getQ(W, S, stride_w, pad_w);\nConvolutionParameters params = new ConvolutionParameters(numRowsPerBlock, C, H, W, K, R, S, stride_h, stride_w, pad_h, pad_w, 1);\n- boolean enableNativeBLAS = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.NATIVE_BLAS);\n+ boolean enableNativeBLAS = NativeHelper.isNativeLibraryLoaded();\nJavaPairRDD<MatrixIndexes,MatrixBlock> out = inputRDD.mapPartitionsToPair(new RDDConv2dMapMMFunction(filterBroadcast, params, instOpcode, biasBroadcast, mcRdd.getRows(), enableNativeBLAS), true);\n//put output RDD handle into symbol table\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java",
"diff": "@@ -35,7 +35,6 @@ import java.util.stream.LongStream;\nimport org.apache.commons.math3.random.Well1024a;\nimport org.apache.hadoop.io.DataInputBuffer;\nimport org.apache.sysml.conf.ConfigurationManager;\n-import org.apache.sysml.conf.DMLConfig;\nimport org.apache.sysml.hops.Hop.OpOp2;\nimport org.apache.sysml.hops.OptimizerUtils;\nimport org.apache.sysml.lops.MMTSJ.MMTSJType;\n@@ -86,6 +85,7 @@ import org.apache.sysml.runtime.util.FastBufferedDataInputStream;\nimport org.apache.sysml.runtime.util.FastBufferedDataOutputStream;\nimport org.apache.sysml.runtime.util.IndexRange;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n+import org.apache.sysml.utils.NativeHelper;\n@@ -4878,8 +4878,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\npublic MatrixValue aggregateBinaryOperations(MatrixIndexes m1Index, MatrixValue m1Value, MatrixIndexes m2Index, MatrixValue m2Value,\nMatrixValue result, AggregateBinaryOperator op ) throws DMLRuntimeException\n{\n- boolean enableNativeBLAS = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.NATIVE_BLAS);\n- return aggregateBinaryOperations(m1Value, m2Value, result, op, enableNativeBLAS);\n+ return aggregateBinaryOperations(m1Value, m2Value, result, op, NativeHelper.isNativeLibraryLoaded());\n}\npublic MatrixValue aggregateBinaryOperations(MatrixIndexes m1Index, MatrixValue m1Value, MatrixIndexes m2Index, MatrixValue m2Value,\n@@ -4889,8 +4888,7 @@ public class MatrixBlock extends MatrixValue implements CacheBlock, Externalizab\n}\npublic MatrixValue aggregateBinaryOperations(MatrixValue m1Value, MatrixValue m2Value, MatrixValue result, AggregateBinaryOperator op) throws DMLRuntimeException {\n- boolean enableNativeBLAS = ConfigurationManager.getDMLConfig().getBooleanValue(DMLConfig.NATIVE_BLAS);\n- return aggregateBinaryOperations(m1Value, m2Value, result, op, enableNativeBLAS);\n+ return aggregateBinaryOperations(m1Value, m2Value, result, op, NativeHelper.isNativeLibraryLoaded());\n}\npublic MatrixValue aggregateBinaryOperations(MatrixValue m1Value, MatrixValue m2Value, MatrixValue result, AggregateBinaryOperator op, boolean nativeMatMult)\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"diff": "@@ -56,6 +56,8 @@ public class NativeHelper {\nprivate static boolean attemptedLoading = false;\n+ private static String hintOnFailures = \"\";\n+\n// Performing loading in a method instead of a static block will throw a detailed stack trace in case of fatal errors\nprivate static void init() {\n// Only Linux supported for BLAS\n@@ -66,8 +68,6 @@ public class NativeHelper {\n// again and again especially in the parfor (hence the double-checking with synchronized).\nif(!attemptedLoading) {\nDMLConfig dmlConfig = ConfigurationManager.getDMLConfig();\n- String userSpecifiedBLAS = System.getenv(\"SYSTEMML_BLAS\");\n- userSpecifiedBLAS = (userSpecifiedBLAS == null) ? \"\" : userSpecifiedBLAS.trim().toLowerCase();\n// -------------------------------------------------------------------------------------\n// We allow BLAS to be enabled or disabled or explicitly selected in one of the two ways:\n// 1. DML Configuration: native.blas (boolean flag)\n@@ -76,13 +76,12 @@ public class NativeHelper {\n// The option 2 is useful for two reasons:\n// - Developer testing of different BLAS\n// - Provides fine-grained control. Certain machines could use mkl while others use openblas, etc.\n- boolean enabledViaConfig = (dmlConfig == null) ? true : dmlConfig.getBooleanValue(DMLConfig.NATIVE_BLAS);\n- boolean enabledViaEnvironmentVariable = userSpecifiedBLAS.equals(\"\") || userSpecifiedBLAS.equals(\"mkl\") || userSpecifiedBLAS.equals(\"openblas\");\n+ String userSpecifiedBLAS = (dmlConfig == null) ? \"auto\" : dmlConfig.getTextValue(DMLConfig.NATIVE_BLAS).trim().toLowerCase();\n- if(enabledViaConfig && enabledViaEnvironmentVariable) {\n+ if(userSpecifiedBLAS.equals(\"auto\") || userSpecifiedBLAS.equals(\"mkl\") || userSpecifiedBLAS.equals(\"openblas\")) {\nlong start = System.nanoTime();\nif(!supportedArchitectures.containsKey(SystemUtils.OS_ARCH)) {\n- LOG.warn(\"Unsupported architecture for native BLAS:\" + SystemUtils.OS_ARCH);\n+ LOG.info(\"Unsupported architecture for native BLAS:\" + SystemUtils.OS_ARCH);\nreturn;\n}\nsynchronized(NativeHelper.class) {\n@@ -92,23 +91,24 @@ public class NativeHelper {\n// By default, we will native.blas=true and we will attempt to load MKL first.\n// If MKL is not enabled then we try to load OpenBLAS.\n// If both MKL and OpenBLAS are not available we fall back to Java BLAS.\n- if(userSpecifiedBLAS.equalsIgnoreCase(\"\")) {\n+ if(userSpecifiedBLAS.equals(\"auto\")) {\nblasType = isMKLAvailable() ? \"mkl\" : isOpenBLASAvailable() ? \"openblas\" : null;\nif(blasType == null)\n- LOG.info(\"Unable to load either MKL or OpenBLAS. Please set \");\n+ LOG.info(\"Unable to load either MKL or OpenBLAS due to \" + hintOnFailures);\n}\n- else if(userSpecifiedBLAS.equalsIgnoreCase(\"mkl\")) {\n+ else if(userSpecifiedBLAS.equals(\"mkl\")) {\nblasType = isMKLAvailable() ? \"mkl\" : null;\nif(blasType == null)\n- LOG.info(\"Unable to load MKL\");\n+ LOG.info(\"Unable to load MKL due to \" + hintOnFailures);\n}\n- else if(userSpecifiedBLAS.equalsIgnoreCase(\"openblas\")) {\n+ else if(userSpecifiedBLAS.equals(\"openblas\")) {\nblasType = isOpenBLASAvailable() ? \"openblas\" : null;\nif(blasType == null)\n- LOG.info(\"Unable to load OpenBLAS\");\n+ LOG.info(\"Unable to load OpenBLAS due to \" + hintOnFailures);\n}\nelse {\n- LOG.info(\"Unsupported BLAS:\" + userSpecifiedBLAS);\n+ // Only thrown at development time.\n+ throw new RuntimeException(\"Unsupported BLAS:\" + userSpecifiedBLAS);\n}\n// =============================================================================\nif(blasType != null && loadLibraryHelper(\"libsystemml_\" + blasType + \"-Linux-x86_64.so\")) {\n@@ -141,14 +141,11 @@ public class NativeHelper {\n}\n}\ndouble timeToLoadInMilliseconds = (System.nanoTime()-start)*1e-6;\n- if(timeToLoadInMilliseconds > 100)\n+ if(timeToLoadInMilliseconds > 1000)\nLOG.warn(\"Time to load native blas: \" + timeToLoadInMilliseconds + \" milliseconds.\");\n}\nelse {\n- if(enabledViaConfig)\n- LOG.warn(\"Using internal Java BLAS as native BLAS support is disabled by the configuration 'native.blas'.\");\n- else\n- LOG.warn(\"Using internal Java BLAS as native BLAS support is disabled by the environment variable 'SYSTEMML_BLAS=\" + userSpecifiedBLAS + \"'.\");\n+ LOG.warn(\"Using internal Java BLAS as native BLAS support the configuration 'native.blas'=\" + userSpecifiedBLAS + \".\");\n}\nattemptedLoading = true;\n}\n@@ -180,6 +177,7 @@ public class NativeHelper {\n// Set environment variable MKL_THREADING_LAYER to GNU on Linux for performance\nif(!loadLibraryHelper(\"libpreload_systemml-Linux-x86_64.so\")) {\nLOG.debug(\"Unable to load preload_systemml (required for loading MKL-enabled SystemML library)\");\n+ hintOnFailures = hintOnFailures + \" libpreload_systemml-Linux-x86_64.so\";\nreturn false;\n}\n// The most reliable way in my investigation to ensure that MKL runs smoothly with OpenMP (used by conv2d*)\n@@ -204,6 +202,8 @@ public class NativeHelper {\nreturn true;\n}\ncatch (UnsatisfiedLinkError e) {\n+ if(!hintOnFailures.contains(blas))\n+ hintOnFailures = hintOnFailures + blas + \" \";\nif(optionalMsg != null)\nLOG.debug(\"Unable to load \" + blas + \"(\" + optionalMsg + \"):\" + e.getMessage());\nelse\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1580] Remove the usage of SYSTEMML_BLAS environment variable from NativeHelper |
49,736 | 03.05.2017 21:02:14 | 28,800 | 76f3ca5d39e492fc3075c4bd8240ec5339647001 | [HOTFIX] Bugfix for metadata of conv2d_* and maxpool_* operations | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"diff": "@@ -242,30 +242,43 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\nswitch(op)\n{\ncase MAX_POOLING: {\n- ret[0] = getInput().get(0)._dim1;\n+ // input\n+ long N = getInput().get(0)._dim1;\n+ ret[0] = N;\nret[1] = getExtractedVal(params.C, params.P, params.Q);\nret[2] = -1;\nbreak;\n}\ncase DIRECT_CONV2D: {\n- ret[0] = getInput().get(0)._dim1;\n- ret[1] = getExtractedVal(getInput().get(1)._dim1, params.P, params.Q);\n+ // input, filter\n+ long N = getInput().get(0)._dim1;\n+ ret[0] = N;\n+ ret[1] = getExtractedVal(params.K, params.P, params.Q);\nret[2] = -1;\nbreak;\n}\ncase DIRECT_CONV2D_BACKWARD_FILTER: {\n- ret[0] = getInput().get(1)._dim1;\n- ret[1] = getInput().get(1)._dim2;\n+ // input, dout\n+ ret[0] = params.K;\n+ ret[1] = getExtractedVal(params.C, params.R, params.S);\nret[2] = -1;\nbreak;\n}\n- case MAX_POOLING_BACKWARD:\n- case DIRECT_CONV2D_BACKWARD_DATA: {\n+ case MAX_POOLING_BACKWARD: {\n+ // input, dout\nret[0] = getInput().get(0)._dim1;\nret[1] = getInput().get(0)._dim2;\nret[2] = -1;\nbreak;\n}\n+ case DIRECT_CONV2D_BACKWARD_DATA: {\n+ // filter, dout\n+ long N = getInput().get(1)._dim1;\n+ ret[0] = N;\n+ ret[1] = getExtractedVal(params.C, params.H, params.W);\n+ ret[2] = -1;\n+ break;\n+ }\ndefault:\nthrow new RuntimeException(\"Unsupported op:\" + op.name());\n}\n@@ -390,13 +403,16 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n{\ncase MAX_POOLING:\n{\n- _dim1 = getInput().get(0)._dim1;\n+ // input\n+ long N = getInput().get(0)._dim1;\n+ _dim1 = N;\n_dim2 = getExtractedVal(params.C, params.P, params.Q);\n_nnz = -1; // cannot infer stats\nbreak;\n}\ncase MAX_POOLING_BACKWARD:\n{\n+ // input, dout\n_dim1 = getInput().get(0)._dim1;\n_dim2 = getInput().get(0)._dim2;\n_nnz = -1;\n@@ -404,22 +420,27 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n}\ncase DIRECT_CONV2D:\n{\n- _dim1 = getInput().get(0)._dim1;\n- _dim2 = getExtractedVal(getInput().get(1)._dim1, params.P, params.Q);\n+ // input, filter\n+ long N = getInput().get(0)._dim1;\n+ _dim1 = N;\n+ _dim2 = getExtractedVal(params.K, params.P, params.Q);\n_nnz = -1; // cannot infer stats\nbreak;\n}\ncase DIRECT_CONV2D_BACKWARD_DATA:\n{\n- _dim1 = getInput().get(0)._dim1;\n- _dim2 = getInput().get(0)._dim2;\n+ // filter, dout\n+ long N = getInput().get(1)._dim1;\n+ _dim1 = N;\n+ _dim2 = getExtractedVal(params.C, params.H, params.W);\n_nnz = -1; // cannot infer stats\nbreak;\n}\ncase DIRECT_CONV2D_BACKWARD_FILTER:\n{\n- _dim1 = getInput().get(1)._dim1;\n- _dim2 = getInput().get(1)._dim2;\n+ // input, dout\n+ _dim1 = params.K;\n+ _dim2 = getExtractedVal(params.C, params.R, params.S);\n_nnz = -1; // cannot infer stats\nbreak;\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java",
"new_path": "src/main/java/org/apache/sysml/parser/DMLTranslator.java",
"diff": "@@ -25,6 +25,7 @@ import java.util.HashMap;\nimport java.util.Iterator;\nimport java.util.List;\n+import org.antlr.v4.parse.ANTLRParser.option_return;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.conf.ConfigurationManager;\n@@ -2785,7 +2786,12 @@ public class DMLTranslator\nthrow new ParseException(\"Unsupported builtin function type: \"+source.getOpCode());\n}\n+ if( !(source.getOpCode() == BuiltinFunctionOp.CONV2D || source.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_DATA ||\n+ source.getOpCode() == BuiltinFunctionOp.CONV2D_BACKWARD_FILTER || source.getOpCode() == BuiltinFunctionOp.MAX_POOL ||\n+ source.getOpCode() == BuiltinFunctionOp.MAX_POOL_BACKWARD) ) {\n+ // Since the dimension of output doesnot match that of input variable for these operations\nsetIdentifierParams(currBuiltinOp, source.getOutput());\n+ }\ncurrBuiltinOp.setAllPositions(source.getBeginLine(), source.getBeginColumn(), source.getEndLine(), source.getEndColumn());\nreturn currBuiltinOp;\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] Bugfix for metadata of conv2d_* and maxpool_* operations |
49,717 | 04.05.2017 16:26:47 | 25,200 | 2c5c3b14e1906cda70ae1581b19a5e908b3ab329 | [HOTFIX] Bug fix for solve, removed warnings and added instrumentation | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/GPUInstructionParser.java",
"diff": "@@ -35,9 +35,9 @@ import org.apache.sysml.runtime.instructions.gpu.AggregateUnaryGPUInstruction;\npublic class GPUInstructionParser extends InstructionParser\n{\n- public static final HashMap<String, GPUINSTRUCTION_TYPE> String2GPUInstructionType;\n+ static final HashMap<String, GPUINSTRUCTION_TYPE> String2GPUInstructionType;\nstatic {\n- String2GPUInstructionType = new HashMap<String, GPUINSTRUCTION_TYPE>();\n+ String2GPUInstructionType = new HashMap<>();\n// Neural Network Operators\nString2GPUInstructionType.put( \"relu_backward\", GPUINSTRUCTION_TYPE.Convolution);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinBinaryGPUInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/BuiltinBinaryGPUInstruction.java",
"diff": "@@ -30,7 +30,9 @@ import org.apache.sysml.runtime.matrix.operators.Operator;\npublic abstract class BuiltinBinaryGPUInstruction extends GPUInstruction {\n+ @SuppressWarnings(\"unused\")\nprivate int _arity;\n+\nCPOperand output;\nCPOperand input1, input2;\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java",
"diff": "@@ -40,6 +40,10 @@ public abstract class GPUInstruction extends Instruction\npublic final static String MISC_TIMER_DEVICE_TO_DEVICE = \"D2D\"; // time spent in copying data from one region on the device to another\npublic final static String MISC_TIMER_SPARSE_TO_DENSE = \"s2d\"; // time spent in converting data from sparse to dense\npublic final static String MISC_TIMER_DENSE_TO_SPARSE = \"d2s\"; // time spent in converting data from dense to sparse\n+ public final static String MISC_TIMER_ROW_TO_COLUMN_MAJOR = \"r2c\"; // time spent in converting data from row major to column major\n+ public final static String MISC_TIMER_COLUMN_TO_ROW_MAJOR = \"c2r\"; // time spent in converting data from column major to row major\n+ public final static String MISC_TIMER_OBJECT_CLONE = \"clone\";// time spent in cloning (deep copying) a GPUObject instance\n+\npublic final static String MISC_TIMER_CUDA_FREE = \"f\"; // time spent in calling cudaFree\npublic final static String MISC_TIMER_ALLOCATE = \"a\"; // time spent to allocate memory on gpu\npublic final static String MISC_TIMER_ALLOCATE_DENSE_OUTPUT = \"ao\"; // time spent to allocate dense output (recorded differently than MISC_TIMER_ALLOCATE)\n@@ -58,6 +62,10 @@ public abstract class GPUInstruction extends Instruction\n// Other BLAS instructions\npublic final static String MISC_TIMER_DAXPY_LIB = \"daxpy\"; // time spent in daxpy\n+ public final static String MISC_TIMER_QR_BUFFER = \"qr_buffer\"; // time spent in calculating buffer needed to perform QR\n+ public final static String MISC_TIMER_QR = \"qr\"; // time spent in doing QR\n+ public final static String MISC_TIMER_ORMQR = \"ormqr\"; // time spent in ormqr\n+ public final static String MISC_TIMER_TRSM = \"trsm\"; // time spent in cublas Dtrsm\n// Transpose\npublic final static String MISC_TIMER_SPARSE_DGEAM_LIB = \"sdgeaml\"; // time spent in sparse transpose (and other ops of type a*op(A) + b*op(B))\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixMatrixBuiltinGPUInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/MatrixMatrixBuiltinGPUInstruction.java",
"diff": "@@ -45,6 +45,7 @@ public class MatrixMatrixBuiltinGPUInstruction extends BuiltinBinaryGPUInstructi\nMatrixObject mat2 = getMatrixInputForGPUInstruction(ec, input2.getName());\nif(opcode.equals(\"solve\")) {\n+ ec.setMetaData(output.getName(), mat1.getNumColumns(), 1);\nLibMatrixCUDA.solve(ec, ec.getGPUContext(), getExtendedOpcode(), mat1, mat2, output.getName());\n} else {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java",
"diff": "@@ -307,6 +307,8 @@ public class GPUContext {\nfreeList = new LinkedList<Pointer>();\nfreeCUDASpaceMap.put(size, freeList);\n}\n+ if (freeList.contains(toFree))\n+ throw new RuntimeException(\"GPU : Internal state corrupted, double free\");\nfreeList.add(toFree);\n}\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java",
"diff": "@@ -26,7 +26,6 @@ import static jcuda.jcudnn.cudnnDataType.CUDNN_DATA_DOUBLE;\nimport static jcuda.jcudnn.cudnnTensorFormat.CUDNN_TENSOR_NCHW;\nimport static jcuda.jcusparse.JCusparse.cusparseDdense2csr;\nimport static jcuda.jcusparse.JCusparse.cusparseDnnz;\n-import static jcuda.runtime.JCuda.cudaMalloc;\nimport static jcuda.runtime.JCuda.cudaMemcpy;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\n@@ -343,7 +342,7 @@ public class GPUObject {\n/**\n* Convenience method. Converts Column Major Dense Matrix to Row Major Dense Matrix\n- * @throws DMLRuntimeException\n+ * @throws DMLRuntimeException if error\n*/\npublic void denseColumnMajorToRowMajor() throws DMLRuntimeException {\nLOG.trace(\"GPU : dense Ptr row-major -> col-major on \" + this + \", GPUContext=\" + getGPUContext());\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -329,6 +329,7 @@ public class LibMatrixCUDA {\n* @return a sparse matrix pointer\n* @throws DMLRuntimeException if error occurs\n*/\n+ @SuppressWarnings(\"unused\")\nprivate static CSRPointer getSparsePointer(GPUContext gCtx, MatrixObject input, String instName) throws DMLRuntimeException {\nif(!isInSparseFormat(gCtx, input)) {\ninput.getGPUObject(gCtx).denseToSparse();\n@@ -2754,6 +2755,25 @@ public class LibMatrixCUDA {\nPointer betaPtr = pointerTo(beta);\nint transa = isLeftTransposed ? CUBLAS_OP_T : CUBLAS_OP_N;\nint transb = isRightTransposed ? CUBLAS_OP_T : CUBLAS_OP_N;\n+\n+ int lda = (int) in1.getNumColumns();\n+ int ldb = (int) in2.getNumColumns();\n+ int m = (int) in1.getNumColumns();\n+ int n = (int) in2.getNumRows();\n+ if (isLeftTransposed && isRightTransposed) {\n+ m = (int) in1.getNumRows();\n+ n = (int) in2.getNumColumns();\n+ }\n+ else if (isLeftTransposed) {\n+ m = (int) in1.getNumRows();\n+ } else if (isRightTransposed) {\n+ n = (int) in2.getNumColumns();\n+ }\n+ int ldc = m;\n+\n+\n+\n+ /**\nint m = (int) in1.getNumRows();\nint n = (int) in1.getNumColumns();\nif(!isLeftTransposed && isRightTransposed) {\n@@ -2763,6 +2783,7 @@ public class LibMatrixCUDA {\nint lda = isLeftTransposed ? n : m;\nint ldb = isRightTransposed ? n : m;\nint ldc = m;\n+ **/\nMatrixObject out = ec.getMatrixObject(outputName);\nboolean isSparse1 = isInSparseFormat(gCtx, in1);\n@@ -2963,8 +2984,10 @@ public class LibMatrixCUDA {\nthrow new DMLRuntimeException(\"GPU : Invalid internal state, the GPUContext set with the ExecutionContext is not the same used to run this LibMatrixCUDA function\");\n// x = solve(A, b)\n+ LOG.trace(\"GPU : solve\" + \", GPUContext=\" + gCtx);\n+\n+ long t0 = -1;\n- // Both Sparse\nif (!isInSparseFormat(gCtx, in1) && !isInSparseFormat(gCtx, in2)) { // Both dense\nGPUObject Aobj = in1.getGPUObject(gCtx);\nGPUObject bobj = in2.getGPUObject(gCtx);\n@@ -2980,26 +3003,41 @@ public class LibMatrixCUDA {\n// convert dense matrices to row major\n// Operation in cuSolver and cuBlas are for column major dense matrices\n// and are destructive to the original input\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nGPUObject ATobj = (GPUObject) Aobj.clone();\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_OBJECT_CLONE, System.nanoTime() - t0);\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nATobj.denseRowMajorToColumnMajor();\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_ROW_TO_COLUMN_MAJOR, System.nanoTime() - t0);\nPointer A = ATobj.getJcudaDenseMatrixPtr();\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nGPUObject bTobj = (GPUObject) bobj.clone();\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_OBJECT_CLONE, System.nanoTime() - t0);\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nbTobj.denseRowMajorToColumnMajor();\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_ROW_TO_COLUMN_MAJOR, System.nanoTime() - t0);\n+\nPointer b = bTobj.getJcudaDenseMatrixPtr();\n// The following set of operations is done following the example in the cusolver documentation\n// http://docs.nvidia.com/cuda/cusolver/#ormqr-example1\n// step 3: query working space of geqrf and ormqr\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nint[] lwork = {0};\nJCusolverDn.cusolverDnDgeqrf_bufferSize(gCtx.getCusolverDnHandle(), m, n, A, m, lwork);\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_QR_BUFFER, System.nanoTime() - t0);\n+\n// step 4: compute QR factorization\n- Pointer work = gCtx.allocate(lwork[0] * Sizeof.DOUBLE);\n- Pointer tau = gCtx.allocate(Math.max(m, m) * Sizeof.DOUBLE);\n+ Pointer work = gCtx.allocate(instName, lwork[0] * Sizeof.DOUBLE);\n+ Pointer tau = gCtx.allocate(instName, Math.max(m, m) * Sizeof.DOUBLE);\nPointer devInfo = gCtx.allocate(Sizeof.INT);\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nJCusolverDn.cusolverDnDgeqrf(gCtx.getCusolverDnHandle(), m, n, A, m, tau, work, lwork[0], devInfo);\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_QR, System.nanoTime() - t0);\n+\nint[] qrError = {-1};\ncudaMemcpy(Pointer.to(qrError), devInfo, Sizeof.INT, cudaMemcpyDeviceToHost);\n@@ -3008,27 +3046,32 @@ public class LibMatrixCUDA {\n}\n// step 5: compute Q^T*B\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nJCusolverDn.cusolverDnDormqr(gCtx.getCusolverDnHandle(), cublasSideMode.CUBLAS_SIDE_LEFT, cublasOperation.CUBLAS_OP_T, m, 1, n, A, m, tau, b, m, work, lwork[0], devInfo);\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_ORMQR, System.nanoTime() - t0);\ncudaMemcpy(Pointer.to(qrError), devInfo, Sizeof.INT, cudaMemcpyDeviceToHost);\nif (qrError[0] != 0) {\nthrow new DMLRuntimeException(\"GPU : Error in call to ormqr (to compuete Q^T*B after QR factorization) as part of solve, argument \" + qrError[0] + \" was wrong\");\n}\n// step 6: compute x = R \\ Q^T*B\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nJCublas2.cublasDtrsm(gCtx.getCublasHandle(),\ncublasSideMode.CUBLAS_SIDE_LEFT, cublasFillMode.CUBLAS_FILL_MODE_UPPER, cublasOperation.CUBLAS_OP_N, cublasDiagType.CUBLAS_DIAG_NON_UNIT,\nn, 1, pointerTo(1.0), A, m, b, m);\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_TRSM, System.nanoTime() - t0);\n+ if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\nbTobj.denseColumnMajorToRowMajor();\n+ if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_COLUMN_TO_ROW_MAJOR, System.nanoTime() - t0);\n// TODO : Find a way to assign bTobj directly to the output and set the correct flags so as to not crash\n// There is an avoidable copy happening here\nMatrixObject out = getDenseMatrixOutputForGPUInstruction(ec, instName, outputName);\ncudaMemcpy(out.getGPUObject(gCtx).getJcudaDenseMatrixPtr(), bTobj.getJcudaDenseMatrixPtr(), n * 1 * Sizeof.DOUBLE, cudaMemcpyDeviceToDevice);\n- gCtx.cudaFreeHelper(work);\n- gCtx.cudaFreeHelper(tau);\n- gCtx.cudaFreeHelper(tau);\n+ gCtx.cudaFreeHelper(instName, work);\n+ gCtx.cudaFreeHelper(instName, tau);\nATobj.clearData();\nbTobj.clearData();\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] Bug fix for solve, removed warnings and added instrumentation |
49,772 | 05.05.2017 13:45:23 | 25,200 | abc9686fbaaa11c12cfa02c49c7675165acdf176 | [MINOR] Adding documentation to IPA functions. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java",
"new_path": "src/main/java/org/apache/sysml/hops/ipa/InterProceduralAnalysis.java",
"diff": "@@ -461,6 +461,19 @@ public class InterProceduralAnalysis\n// INTRA-PROCEDURE ANALYSIS\n//////\n+ /**\n+ * Perform intra-procedural analysis (IPA) by propagating statistics\n+ * across statement blocks.\n+ *\n+ * @param sb DML statement blocks.\n+ * @param fcand Function candidates.\n+ * @param callVars Map of variables eligible for propagation.\n+ * @param fcandSafeNNZ Function candidate safe non-zeros.\n+ * @param unaryFcands Unary function candidates.\n+ * @param fnStack Function stack to determine current scope.\n+ * @throws HopsException If a HopsException occurs.\n+ * @throws ParseException If a ParseException occurs.\n+ */\nprivate void propagateStatisticsAcrossBlock( StatementBlock sb, Map<String, Integer> fcand, LocalVariableMap callVars, Map<String, Set<Long>> fcandSafeNNZ, Set<String> unaryFcands, Set<String> fnStack )\nthrows HopsException, ParseException\n{\n@@ -552,9 +565,15 @@ public class InterProceduralAnalysis\n*\n* This replaces scalar reads and typecasts thereof with literals.\n*\n+ * Ultimately, this leads to improvements because the size\n+ * expression evaluation over DAGs with scalar symbol table entries\n+ * (which is also applied during IPA) is limited to supported\n+ * operations, whereas literal replacement is a brute force method\n+ * that applies to all (including future) operations.\n+ *\n* @param roots List of HOPs.\n* @param vars Map of variables eligible for propagation.\n- * @throws HopsException\n+ * @throws HopsException If a HopsException occurs.\n*/\nprivate void propagateScalarsAcrossDAG(ArrayList<Hop> roots, LocalVariableMap vars)\nthrows HopsException\n@@ -590,6 +609,13 @@ public class InterProceduralAnalysis\n}\n}\n+ /**\n+ * Propagate matrix sizes across DAGs.\n+ *\n+ * @param roots List of HOP DAG root nodes.\n+ * @param vars Map of variables eligible for propagation.\n+ * @throws HopsException If a HopsException occurs.\n+ */\nprivate void propagateStatisticsAcrossDAG( ArrayList<Hop> roots, LocalVariableMap vars )\nthrows HopsException\n{\n@@ -616,6 +642,21 @@ public class InterProceduralAnalysis\n// INTER-PROCEDURE ANALYIS\n//////\n+ /**\n+ * Propagate statistics from the calling program into a function\n+ * block.\n+ *\n+ * @param prog The DML program.\n+ * @param roots List of HOP DAG root notes for propagation.\n+ * @param fcand Function candidates.\n+ * @param callVars Calling program's map of variables eligible for\n+ * propagation.\n+ * @param fcandSafeNNZ Function candidate safe non-zeros.\n+ * @param unaryFcands Unary function candidates.\n+ * @param fnStack Function stack to determine current scope.\n+ * @throws HopsException If a HopsException occurs.\n+ * @throws ParseException If a ParseException occurs.\n+ */\nprivate void propagateStatisticsIntoFunctions(DMLProgram prog, ArrayList<Hop> roots, Map<String, Integer> fcand, LocalVariableMap callVars, Map<String, Set<Long>> fcandSafeNNZ, Set<String> unaryFcands, Set<String> fnStack )\nthrows HopsException, ParseException\n{\n@@ -623,6 +664,21 @@ public class InterProceduralAnalysis\npropagateStatisticsIntoFunctions(prog, root, fcand, callVars, fcandSafeNNZ, unaryFcands, fnStack);\n}\n+ /**\n+ * Propagate statistics from the calling program into a function\n+ * block.\n+ *\n+ * @param prog The DML program.\n+ * @param hop HOP to propagate statistics into.\n+ * @param fcand Function candidates.\n+ * @param callVars Calling program's map of variables eligible for\n+ * propagation.\n+ * @param fcandSafeNNZ Function candidate safe non-zeros.\n+ * @param unaryFcands Unary function candidates.\n+ * @param fnStack Function stack to determine current scope.\n+ * @throws HopsException If a HopsException occurs.\n+ * @throws ParseException If a ParseException occurs.\n+ */\nprivate void propagateStatisticsIntoFunctions(DMLProgram prog, Hop hop, Map<String, Integer> fcand, LocalVariableMap callVars, Map<String, Set<Long>> fcandSafeNNZ, Set<String> unaryFcands, Set<String> fnStack )\nthrows HopsException, ParseException\n{\n@@ -733,6 +789,19 @@ public class InterProceduralAnalysis\n}\n}\n+ /**\n+ * Extract return variable statistics from this function into the\n+ * calling program.\n+ *\n+ * @param fstmt The function statement.\n+ * @param fop The function op.\n+ * @param tmpVars Function's map of variables eligible for\n+ * extraction.\n+ * @param callVars Calling program's map of variables.\n+ * @param overwrite Whether or not to overwrite variables in the\n+ * calling program's variable map.\n+ * @throws HopsException If a HopsException occurs.\n+ */\nprivate void extractFunctionCallReturnStatistics( FunctionStatement fstmt, FunctionOp fop, LocalVariableMap tmpVars, LocalVariableMap callVars, boolean overwrite )\nthrows HopsException\n{\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java",
"new_path": "src/main/java/org/apache/sysml/hops/recompile/Recompiler.java",
"diff": "@@ -1312,11 +1312,18 @@ public class Recompiler\n}\n+ /**\n+ * Remove any scalar variables from the variable map if the variable\n+ * is updated in this block.\n+ *\n+ * @param callVars Map of variables eligible for propagation.\n+ * @param sb DML statement block.\n+ */\npublic static void removeUpdatedScalars( LocalVariableMap callVars, StatementBlock sb )\n{\nif( sb != null )\n{\n- //remove update scalar variables from constants\n+ //remove updated scalar variables from constants\nfor( String varname : sb.variablesUpdated().getVariables().keySet() )\n{\nData dat = callVars.get(varname);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Adding documentation to IPA functions. |
49,736 | 05.05.2017 13:53:35 | 28,800 | b9814ccf0c024828d3f9c3e92b505ffb73ad73a1 | Additional tests to compare the accuracy of different convolution related operators with CuDNN
Closes | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<exclude>perftest</exclude>\n<exclude>staging/**/*</exclude>\n<exclude>staging</exclude>\n+ <exclude>nn/test/**/*</exclude>\n<!-- <exclude>*.sh</exclude> --> <!-- applies to sparkDML.sh -->\n</excludes>\n<targetPath>scripts</targetPath>\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/README.md",
"diff": "+<!--\n+{% comment %}\n+Licensed to the Apache Software Foundation (ASF) under one or more\n+contributor license agreements. See the NOTICE file distributed with\n+this work for additional information regarding copyright ownership.\n+The ASF licenses this file to you under the Apache License, Version 2.0\n+(the \"License\"); you may not use this file except in compliance with\n+the License. You may obtain a copy of the License at\n+\n+http://www.apache.org/licenses/LICENSE-2.0\n+\n+Unless required by applicable law or agreed to in writing, software\n+distributed under the License is distributed on an \"AS IS\" BASIS,\n+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+See the License for the specific language governing permissions and\n+limitations under the License.\n+{% endcomment %}\n+-->\n+\n+# Built-in functions Tests\n+\n+The scripts in this folder tests the convolutions and maxpooling built-in functions\n+by comparing the CPU implementation v/s GPU implementation.\n+These scripts allows the developer to test different CPU implementation (such\n+as sparse data and dense filter, dense data and sparse filter, etc)\n+with MKL, OpenBLAS and Java.\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/compare.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1)\n+Y = read($2)\n+msg = ifdef($3, \" \")\n+eps = 1e-3\n+num_mismatch = sum(abs(X - Y) > eps)\n+if(num_mismatch > 0) {\n+ print(\"---------------------------------------------------\\nERROR: >>>>>>>>> The results don't match(num_mismatch:\" + num_mismatch + \"): \" + msg + \"\\n---------------------------------------------------\")\n+ Z = abs(X - Y) > eps\n+ print(\"X=\" + toString(X*Z))\n+ print(\"Y=\" + toString(Y*Z))\n+\n+}\n+else {\n+ print(\"The results match: \" + msg)\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/gen_conv2d.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rand(rows=$N, cols=$C*$H*$W, sparsity=$sp, min=-0.5, max=1)\n+w = rand(rows=$F, cols=$C*$Hf*$Wf, sparsity=$sp, min=-0.5, max=1)\n+write(X, \"input.mtx\", format=\"binary\")\n+write(w, \"filter.mtx\", format=\"binary\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/gen_conv2d_bwd_data.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+Hout = floor(($H + 2 * $pad - $Hf) / $stride) + 1\n+Wout = floor(($W + 2 * $pad - $Wf) / $stride) + 1\n+\n+w = rand(rows=$F, cols=$C*$Hf*$Wf, sparsity=$sp, min=-0.5, max=1)\n+dout = rand(rows=$N, cols=$F*Hout*Wout, sparsity=$sp, min=-0.5, max=1)\n+\n+write(w, \"filter.mtx\", format=\"binary\")\n+write(dout, \"dout.mtx\", format=\"binary\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/gen_conv2d_bwd_filter.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+Hout = floor(($H + 2 * $pad - $Hf) / $stride) + 1\n+Wout = floor(($W + 2 * $pad - $Wf) / $stride) + 1\n+\n+X = rand(rows=$N, cols=$C*$H*$W, sparsity=$sp, min=-0.5, max=1)\n+dout = rand(rows=$N, cols=$F*Hout*Wout, sparsity=$sp, min=-0.5, max=1)\n+\n+write(X, \"input.mtx\", format=\"binary\")\n+write(dout, \"dout.mtx\", format=\"binary\")\n+\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/gen_maxpool.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = rand(rows=$N, cols=$C*$H*$W, sparsity=$sp, min=-0.5, max=1)\n+write(X, \"input.mtx\", format=\"binary\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/run_tests.sh",
"diff": "+#!/usr/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+# Additional tests to compare the accuracy of different convolution related operators with CuDNN\n+./test_conv2d_bwd_filter.sh\n+./test_conv2d_bwd_data.sh\n+./test_conv2d.sh\n+./test_maxpool.sh\n\\ No newline at end of file\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_conv2d.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read(\"input.mtx\")\n+w = read(\"filter.mtx\")\n+out = conv2d(X, w, input_shape=[$N,$C,$H,$W], filter_shape=[$F, $C, $Hf, $Wf], stride=[$stride,$stride], padding=[$pad,$pad])\n+write(out, $out, format=\"csv\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_conv2d.sh",
"diff": "+#!/usr/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+jars='.'\n+os_suffix='linux-x86_64'\n+version='0.8.0'\n+\n+# Downloads the jcuda jars\n+for lib in jcuda jcublas jcufft jcusparse jcusolver jcurand jnvgraph jcudnn\n+do\n+ file=$lib'-'$version'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+\n+ file=$lib'-natives-'$version'-'$os_suffix'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'-natives/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+done\n+\n+# N = Number of images, C = number of channels, H = height, W = width\n+# F = number of filters, Hf = filter height, Wf = filter width\n+N=5\n+C=3\n+H=28\n+W=28\n+F=32\n+Hf=3\n+Wf=3\n+for sparsity in 0.1 0.2 0.5 0.6 0.9\n+do\n+ # Generating the data\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f gen_conv2d.dml -nvargs sp=$sparsity N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf\n+ for stride in 1 2 3\n+ do\n+ for pad in 0 1 2\n+ do\n+ # Running a test in CPU mode\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f test_conv2d.dml -nvargs stride=$stride pad=$pad out=out_cp.csv N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf\n+ # Running a test in GPU mode\n+ $SPARK_HOME/bin/spark-submit --jars $jars SystemML.jar -f test_conv2d.dml -stats -gpu force -nvargs stride=$stride pad=$pad out=out_gpu.csv N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf\n+ # Comparing the CPU vs GPU results to make sure they are the same\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"conv2d:stride=\"$stride\",pad=\"$pad\",sparsity=\"$sparsity\n+ rm -rf out_cp.csv out_gpu.csv out_cp.csv.mtd out_gpu.csv.mtd\n+ done\n+ done\n+ rm -rf input.mtx input.mtx.mtd\n+done\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_conv2d_bwd_data.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+w = read(\"filter.mtx\")\n+dout = read(\"dout.mtx\")\n+out = conv2d_backward_data(w, dout, input_shape=[$N,$C,$H,$W], filter_shape=[$F, $C, $Hf, $Wf], stride=[$stride,$stride], padding=[$pad,$pad])\n+write(out, $out, format=\"csv\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_conv2d_bwd_data.sh",
"diff": "+#!/usr/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+jars='.'\n+os_suffix='linux-x86_64'\n+version='0.8.0'\n+\n+# Downloads the jcuda jars\n+for lib in jcuda jcublas jcufft jcusparse jcusolver jcurand jnvgraph jcudnn\n+do\n+ file=$lib'-'$version'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+\n+ file=$lib'-natives-'$version'-'$os_suffix'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'-natives/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+done\n+\n+# N = Number of images, C = number of channels, H = height, W = width\n+# F = number of filters, Hf = filter height, Wf = filter width\n+N=5\n+C=3\n+H=28\n+W=28\n+F=32\n+Hf=3\n+Wf=3\n+for sparsity in 0.1 0.2 0.5 0.6 0.9\n+do\n+ for stride in 1 2 3\n+ do\n+ for pad in 0 1 2\n+ do\n+ # Generating the data\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f gen_conv2d_bwd_data.dml -nvargs sp=$sparsity N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf stride=$stride pad=$pad\n+ # Running a test in CPU mode\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f test_conv2d_bwd_data.dml -nvargs stride=$stride pad=$pad out=out_cp.csv N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf\n+ # Running a test in GPU mode\n+ $SPARK_HOME/bin/spark-submit --jars $jars SystemML.jar -f test_conv2d_bwd_data.dml -stats -gpu force -nvargs stride=$stride pad=$pad out=out_gpu.csv N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf\n+ # Comparing the CPU vs GPU results to make sure they are the same\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"conv2d_backward_data:stride=\"$stride\",pad=\"$pad\",sparsity=\"$sparsity\n+ rm -rf out_cp.csv out_gpu.csv out_cp.csv.mtd out_gpu.csv.mtd\n+ done\n+ done\n+ rm -rf input.mtx input.mtx.mtd\n+done\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_conv2d_bwd_filter.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read(\"input.mtx\")\n+dout = read(\"dout.mtx\")\n+out = conv2d_backward_filter(X, dout, input_shape=[$N,$C,$H,$W], filter_shape=[$F, $C, $Hf, $Wf], stride=[$stride,$stride], padding=[$pad,$pad])\n+write(out, $out, format=\"csv\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_conv2d_bwd_filter.sh",
"diff": "+#!/usr/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+jars='.'\n+os_suffix='linux-x86_64'\n+version='0.8.0'\n+\n+# Downloads the jcuda jars\n+for lib in jcuda jcublas jcufft jcusparse jcusolver jcurand jnvgraph jcudnn\n+do\n+ file=$lib'-'$version'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+\n+ file=$lib'-natives-'$version'-'$os_suffix'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'-natives/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+done\n+\n+# N = Number of images, C = number of channels, H = height, W = width\n+# F = number of filters, Hf = filter height, Wf = filter width\n+N=5\n+C=3\n+H=28\n+W=28\n+F=32\n+Hf=3\n+Wf=3\n+for sparsity in 0.1 0.2 0.5 0.6 0.9\n+do\n+ for stride in 1 2 3\n+ do\n+ for pad in 0 1 2\n+ do\n+ # Generating the data\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f gen_conv2d_bwd_filter.dml -nvargs sp=$sparsity N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf stride=$stride pad=$pad\n+ # Running a test in CPU mode\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f test_conv2d_bwd_filter.dml -nvargs stride=$stride pad=$pad out=out_cp.csv N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf\n+ # Running a test in GPU mode\n+ $SPARK_HOME/bin/spark-submit --jars $jars SystemML.jar -f test_conv2d_bwd_filter.dml -stats -gpu force -nvargs stride=$stride pad=$pad out=out_gpu.csv N=$N C=$C H=$H W=$W F=$F Hf=$Hf Wf=$Wf\n+ # Comparing the CPU vs GPU results to make sure they are the same\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"conv2d_backward_filter:stride=\"$stride\",pad=\"$pad\",sparsity=\"$sparsity\n+ rm -rf out_cp.csv out_gpu.csv out_cp.csv.mtd out_gpu.csv.mtd\n+ done\n+ done\n+ rm -rf input.mtx input.mtx.mtd\n+done\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_maxpool.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read(\"input.mtx\")\n+X = max(X, 0)\n+out = max_pool(X, input_shape=[$N,$C,$H,$W], pool_size=[$pool,$pool], stride=[$stride,$stride], padding=[$pad,$pad])\n+write(out, $out, format=\"csv\")\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/test/compare_backends/test_maxpool.sh",
"diff": "+#!/usr/bin/bash\n+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+jars='.'\n+os_suffix='linux-x86_64'\n+version='0.8.0'\n+\n+# Downloads the jcuda jars\n+for lib in jcuda jcublas jcufft jcusparse jcusolver jcurand jnvgraph jcudnn\n+do\n+ file=$lib'-'$version'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+\n+ file=$lib'-natives-'$version'-'$os_suffix'.jar'\n+ if [ ! -f $file ]; then\n+ url='https://search.maven.org/remotecontent?filepath=org/jcuda/'$lib'-natives/'$version'/'$file\n+ wget -O $file $url\n+ fi\n+ jars=$jars','$file\n+done\n+\n+# N = Number of images, C = number of channels, H = height, W = width\n+N=5\n+C=3\n+H=28\n+W=28\n+for sparsity in 0.1 0.2 0.5 0.6 0.9\n+do\n+ # Generating the data\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f gen_maxpool.dml -nvargs sp=$sparsity N=$N C=$C H=$H W=$W\n+ for stride in 1 2 3\n+ do\n+ for pad in 0 1 2\n+ do\n+ # Running a test in CPU mode\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f test_maxpool.dml -nvargs stride=$stride pad=$pad out=out_cp.csv N=$N C=$C H=$H W=$W pool=3\n+ # Running a test in GPU mode\n+ $SPARK_HOME/bin/spark-submit --jars $jars SystemML.jar -f test_maxpool.dml -stats -gpu force -nvargs stride=$stride pad=$pad out=out_gpu.csv N=$N C=$C H=$H W=$W pool=3\n+ # Comparing the CPU vs GPU results to make sure they are the same\n+ $SPARK_HOME/bin/spark-submit SystemML.jar -f compare.dml -args out_cp.csv out_gpu.csv \"maxpool:stride=\"$stride\",pad=\"$pad\n+ rm -rf out_cp.csv out_gpu.csv out_cp.csv.mtd out_gpu.csv.mtd\n+ done\n+ done\n+ rm -rf input.mtx input.mtx.mtd\n+done\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-540] Additional tests to compare the accuracy of different convolution related operators with CuDNN
Closes #477. |
49,736 | 05.05.2017 15:32:12 | 28,800 | 44d7a88576dccce0bc2262588eb25cb521ca739d | [MINOR] Added SGDNesterovUpdate UDF support to Caffe2DML
This will allow us to extrapolate the performance gains using codegen.
The UDF is disabled by default and is enabled only after setting `export
USE_NESTEROV_UDF="true"`. We will remove this flag and also the class
org.apache.sysml.udf.lib.SGDNesterovUpdate after codegen is stable. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/udf/lib/SGDNesterovUpdate.java",
"new_path": "src/main/java/org/apache/sysml/udf/lib/SGDNesterovUpdate.java",
"diff": "@@ -68,6 +68,10 @@ public class SGDNesterovUpdate extends PackageFunction {\nthrow new RuntimeException(\"Invalid function output being requested\");\n}\n+ boolean isDense(MatrixBlock X) {\n+ return !X.isInSparseFormat() && X.getDenseBlock() != null;\n+ }\n+\n@Override\npublic void execute() {\ntry {\n@@ -81,19 +85,53 @@ public class SGDNesterovUpdate extends PackageFunction {\nupdatedV = new Matrix( \"tmp_\" + rand.nextLong(), v.getNumRows(), v.getNumColumns(), ValueType.Double );\nMatrixBlock updatedVMB = allocateDenseMatrixBlock(updatedV);\ndouble [] updatedVData = updatedVMB.getDenseBlock();\n+ if(isDense(v) && isDense(dX)) {\n+ double [] vArr = v.getDenseBlock();\n+ double [] dXArr = dX.getDenseBlock();\n+ int nnz = 0;\n+ for(int i = 0; i < updatedVData.length; i++) {\n+ updatedVData[i] = mu*vArr[i] - lr*dXArr[i];\n+ nnz += (updatedVData[i]!=0) ? 1 : 0;\n+ }\n+ updatedVMB.setNonZeros(nnz);\n+ }\n+ else {\nmultiplyByConstant(v, mu, updatedVData);\nmultiplyByConstant(dX, -lr, updatedVData);\n- updatedVMB.setNonZeros(-1); // rather than updatedVMB.recomputeNonZeros();\n+ updatedVMB.recomputeNonZeros();\n+ }\nupdatedV.setMatrixDoubleArray(updatedVMB, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n// X = X - mu * v_prev + (1 + mu) * v\nupdatedX = new Matrix( \"tmp_\" + rand.nextLong(), X.getNumRows(), X.getNumColumns(), ValueType.Double );\nMatrixBlock updatedXMB = allocateDenseMatrixBlock(updatedX);\ndouble [] updatedXData = updatedXMB.getDenseBlock();\n+ if(isDense(X) && isDense(v)) {\n+ double [] XArr = X.getDenseBlock();\n+ double [] vPrevArr = v.getDenseBlock();\n+ int nnz = 0; double muPlus1 = mu+1;\n+ for(int i = 0; i < updatedXData.length; i++) {\n+ updatedXData[i] = XArr[i] - mu*vPrevArr[i] + muPlus1*updatedVData[i];\n+ nnz += (updatedXData[i]!=0) ? 1 : 0;\n+ }\n+ updatedXMB.setNonZeros(nnz);\n+ }\n+ else if(isDense(v)) {\n+ copy(X, updatedXData);\n+ double [] vPrevArr = v.getDenseBlock();\n+ int nnz = 0; double muPlus1 = mu+1;\n+ for(int i = 0; i < updatedXData.length; i++) {\n+ updatedXData[i] += - mu*vPrevArr[i] + muPlus1*updatedVData[i];\n+ nnz += (updatedXData[i]!=0) ? 1 : 0;\n+ }\n+ updatedXMB.setNonZeros(nnz);\n+ }\n+ else {\ncopy(X, updatedXData);\nmultiplyByConstant(v, -mu, updatedXData);\nmultiplyByConstant(updatedVData, 1+mu, updatedXData);\n- updatedXMB.setNonZeros(-1); // rather than updatedXMB.recomputeNonZeros();\n+ updatedXMB.recomputeNonZeros();\n+ }\nupdatedX.setMatrixDoubleArray(updatedXMB, OutputInfo.BinaryBlockOutputInfo, InputInfo.BinaryBlockInputInfo);\n((Matrix) getFunctionInput(0)).getMatrixObject().release();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala",
"diff": "@@ -61,6 +61,13 @@ object Caffe2DML {\n// Naming conventions:\nval X = \"X\"; val y = \"y\"; val batchSize = \"BATCH_SIZE\"; val numImages = \"num_images\"; val numValidationImages = \"num_validation\"\nval XVal = \"X_val\"; val yVal = \"y_val\"\n+\n+ var USE_NESTEROV_UDF = {\n+ // Developer environment variable flag 'USE_NESTEROV_UDF' until codegen starts working.\n+ // Then, we will remove this flag and also the class org.apache.sysml.udf.lib.SGDNesterovUpdate\n+ val envFlagNesterovUDF = System.getenv(\"USE_NESTEROV_UDF\")\n+ envFlagNesterovUDF != null && envFlagNesterovUDF.toBoolean\n+ }\n}\nclass Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\n@@ -283,6 +290,10 @@ class Caffe2DML(val sc: SparkContext, val solverParam:Caffe.SolverParameter,\nsource(net, solver, Array[String](\"l2_reg\"))\nappendVisualizationHeaders(dmlScript, numTabs)\n+ if(Caffe2DML.USE_NESTEROV_UDF) {\n+ tabDMLScript(dmlScript, numTabs).append(\"update_nesterov = externalFunction(matrix[double] X, matrix[double] dX, double lr, double mu, matrix[double] v) return (matrix[double] X, matrix[double] v) implemented in (classname=\\\"org.apache.sysml.udf.lib.SGDNesterovUpdate\\\",exectype=\\\"mem\\\"); \\n\")\n+ }\n+\n// Read and convert to one-hote encoding\nassign(tabDMLScript, \"X_full\", \"read(\\\" \\\", format=\\\"csv\\\")\")\nassign(tabDMLScript, \"y_full\", \"read(\\\" \\\", format=\\\"csv\\\")\")\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/dl/CaffeSolver.scala",
"new_path": "src/main/scala/org/apache/sysml/api/dl/CaffeSolver.scala",
"diff": "@@ -145,10 +145,11 @@ class AdaGrad(lambda:Double=5e-04, epsilon:Double=1e-6) extends CaffeSolver {\nclass Nesterov(lambda:Double=5e-04, momentum:Double=0.9) extends CaffeSolver {\ndef update(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {\nl2reg_update(lambda, dmlScript, layer)\n+ val fn = if(Caffe2DML.USE_NESTEROV_UDF) \"update_nesterov\" else \"sgd_nesterov::update\"\nif(layer.shouldUpdateWeight) dmlScript.append(\"\\t\").append(\"[\"+ commaSep(layer.weight, layer.weight+\"_v\") + \"] \" +\n- \"= sgd_nesterov::update(\" + commaSep(layer.weight, layer.dWeight, getWeightLr(layer), momentum.toString, layer.weight+\"_v\") + \")\\n\")\n+ \"= \" + fn + \"(\" + commaSep(layer.weight, layer.dWeight, getWeightLr(layer), momentum.toString, layer.weight+\"_v\") + \")\\n\")\nif(layer.shouldUpdateBias) dmlScript.append(\"\\t\").append(\"[\"+ commaSep(layer.bias, layer.bias+\"_v\") + \"] \" +\n- \"= sgd_nesterov::update(\" + commaSep(layer.bias, layer.dBias, getBiasLr(layer), momentum.toString, layer.bias+\"_v\") + \")\\n\")\n+ \"= \" + fn + \"(\" + commaSep(layer.bias, layer.dBias, getBiasLr(layer), momentum.toString, layer.bias+\"_v\") + \")\\n\")\n}\ndef init(dmlScript:StringBuilder, layer:CaffeLayer):Unit = {\nif(layer.shouldUpdateWeight) dmlScript.append(layer.weight+\"_v = sgd_nesterov::init(\" + layer.weight + \")\\n\")\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Added SGDNesterovUpdate UDF support to Caffe2DML
This will allow us to extrapolate the performance gains using codegen.
The UDF is disabled by default and is enabled only after setting `export
USE_NESTEROV_UDF="true"`. We will remove this flag and also the class
org.apache.sysml.udf.lib.SGDNesterovUpdate after codegen is stable. |
49,738 | 07.05.2017 15:25:44 | 25,200 | 576eb4e96a3aabacfd067be1ce5a644b83ce422e | [MINOR] Fix flaky matrix converter tests (static allocated sync points) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/io/MatrixReader.java",
"new_path": "src/main/java/org/apache/sysml/runtime/io/MatrixReader.java",
"diff": "@@ -116,8 +116,10 @@ public abstract class MatrixReader\n//create synchronization points for MCSR (start row per block row)\nif( sblock instanceof SparseBlockMCSR && clen > bclen //multiple col blocks\n&& clen > 0 && bclen > 0 && rlen > 0 && brlen > 0 ) { //all dims known\n+ //note: allocate w/ min 2 nnz to ensure allocated row object because\n+ //adaptive change from scalar to row could cause synchronization issues\nfor( int i=0; i<rlen; i+=brlen )\n- sblock.allocate(i, Math.max((int)(estnnz/rlen),1), (int)clen);\n+ sblock.allocate(i, Math.max((int)(estnnz/rlen),2), (int)clen);\n}\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix flaky matrix converter tests (static allocated sync points) |
49,736 | 07.05.2017 19:34:12 | 28,800 | 6863632088c8d0b548a17413692b399d512a991d | Check for the empty block case
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"new_path": "src/main/java/org/apache/sysml/runtime/instructions/cp/ConvolutionCPInstruction.java",
"diff": "@@ -226,7 +226,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\nMatrixBlock outputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(),\nLibMatrixDNN.SUPPORTS_SPARSE_OUTPUTS && (input.isInSparseFormat() || dout.isInSparseFormat()));\n- if( !input.isEmptyBlock() && !dout.isEmptyBlock() ) {\n+ if( !input.isEmpty() && !dout.isEmpty() ) {\noutputBlock.allocateDenseOrSparseBlock();\nLibMatrixDNN.reluBackward(input, dout, outputBlock, _numThreads);\n}\n@@ -246,10 +246,10 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\nthrow new DMLRuntimeException(\"Expected the number of columns of bias matrix to be 1, but found \" + bias.getNumColumns());\n}\n- if(input.isEmptyBlock() && bias.isEmptyBlock()) {\n+ if(input.isEmpty() && bias.isEmpty()) {\noutputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), true);\n}\n- else if(bias.isEmptyBlock()) {\n+ else if(bias.isEmpty()) {\noutputBlock = new MatrixBlock(input);\n}\nelse {\n@@ -274,7 +274,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\nthrow new DMLRuntimeException(\"Expected the number of columns of bias matrix to be 1, but found \" + bias.getNumColumns());\n}\n- if(bias.isEmptyBlock()) {\n+ if(bias.isEmpty()) {\n// Anything multiplied by zero is zero\noutputBlock = new MatrixBlock(input.getNumRows(), input.getNumColumns(), true);\n}\n@@ -342,7 +342,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\nConvolutionParameters params = new ConvolutionParameters(N, C, H, W, K, R, S, stride_h, stride_w, pad_h, pad_w, _numThreads);\nparams.enableNative = NativeHelper.isNativeLibraryLoaded();\nif (instOpcode.equalsIgnoreCase(\"maxpooling\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling\")) {\n- if(matBlock.isEmptyBlock()) {\n+ if(matBlock.isEmpty()) {\noutputBlock = new MatrixBlock(N, C*P*Q, true);\n}\nelse {\n@@ -354,7 +354,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\n}\nelse if (instOpcode.equalsIgnoreCase(\"maxpooling_backward\") || instOpcode.equalsIgnoreCase(\"relu_maxpooling_backward\")) {\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\n- if(matBlock.isEmptyBlock() || dout.isEmptyBlock()) {\n+ if(matBlock.isEmpty() || dout.isEmpty()) {\noutputBlock = new MatrixBlock(N, C*H*W, true);\n}\nelse {\n@@ -368,7 +368,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\n}\nelse if (instOpcode.equalsIgnoreCase(\"conv2d\")) {\nMatrixBlock filter = ec.getMatrixInput(_in2.getName());\n- if(filter.isEmptyBlock() || matBlock.isEmptyBlock()) {\n+ if(filter.isEmpty() || matBlock.isEmpty()) {\noutputBlock = new MatrixBlock(N, K*P*Q, true);\n}\nelse {\n@@ -383,12 +383,12 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\nelse if (instOpcode.equalsIgnoreCase(\"conv2d_bias_add\")) {\nMatrixBlock filter = ec.getMatrixInput(_in3.getName());\nMatrixBlock bias = ec.getMatrixInput(_in2.getName());\n- if((filter.isEmptyBlock() || matBlock.isEmptyBlock()) && bias.isEmptyBlock()) {\n+ if((filter.isEmpty() || matBlock.isEmpty()) && bias.isEmpty()) {\noutputBlock = new MatrixBlock(N, K*P*Q, true);\n}\nelse {\noutputBlock = getDenseOutputBlock(N, K*P*Q);\n- if(!bias.isEmptyBlock()) {\n+ if(!bias.isEmpty()) {\nparams.bias = bias;\n}\nif(params.enableNative && !isFilterSparse(filter) && !matBlock.isInSparseFormat())\n@@ -401,7 +401,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\n}\nelse if (instOpcode.equalsIgnoreCase(\"conv2d_backward_filter\")) {\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\n- if(dout.isEmptyBlock() || matBlock.isEmptyBlock()) {\n+ if(dout.isEmpty() || matBlock.isEmpty()) {\noutputBlock = new MatrixBlock(K, C*R*S, true);\n}\nelse {\n@@ -415,7 +415,7 @@ public class ConvolutionCPInstruction extends UnaryCPInstruction\n}\nelse if (instOpcode.equalsIgnoreCase(\"conv2d_backward_data\")) {\nMatrixBlock dout = ec.getMatrixInput(_in2.getName());\n- if(dout.isEmptyBlock() || matBlock.isEmptyBlock()) {\n+ if(dout.isEmpty() || matBlock.isEmpty()) {\noutputBlock = new MatrixBlock(N, C * H * W, true);\n}\nelse {\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixDNN.java",
"diff": "@@ -1138,7 +1138,7 @@ public class LibMatrixDNN {\nelse {\n// In all other cases, perform im2col in Java + matmult (either native or java).\nMatrixBlock im2ColOutBlock = _im2ColOutBlocks.remove();\n- double [] temp = _params.input1.isInSparseFormat() ? new double[_params.input1.getNumColumns()] : null;\n+ double [] temp = (_params.input1.isInSparseFormat() || _params.input1.denseBlock == null) ? new double[_params.input1.getNumColumns()] : null;\nfor(int n = _rl; n < _ru; n++)\ndoLoopedIm2ColConv2d(n, im2ColOutBlock, _params, temp);\n_im2ColOutBlocks.add(im2ColOutBlock);\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1589] Check for the empty block case
Closes #487. |
49,738 | 07.05.2017 20:49:38 | 25,200 | 19e21744c86adbedf6098906808c2c6327659cfe | Fix codegen handling of unsupported row aggregates | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"diff": "@@ -78,7 +78,8 @@ public class TemplateRow extends TemplateBase\n|| (hop instanceof AggBinaryOp && hop.getDim2()==1\n&& hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1)\n|| (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()!=Direction.RowCol\n- && hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1);\n+ && hop.getInput().get(0).getDim1()>1 && hop.getInput().get(0).getDim2()>1\n+ && HopRewriteUtils.isAggUnaryOp(hop, SUPPORTED_ROW_AGG));\n}\n@Override\n@@ -89,7 +90,8 @@ public class TemplateRow extends TemplateBase\n|| HopRewriteUtils.isBinaryMatrixScalarOperation(hop)) )\n|| ((hop instanceof UnaryOp || hop instanceof ParameterizedBuiltinOp)\n&& TemplateCell.isValidOperation(hop))\n- || (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()!=Direction.RowCol)\n+ || (hop instanceof AggUnaryOp && ((AggUnaryOp)hop).getDirection()!=Direction.RowCol\n+ && HopRewriteUtils.isAggUnaryOp(hop, SUPPORTED_ROW_AGG))\n|| (hop instanceof AggBinaryOp && hop.getDim1()>1\n&& HopRewriteUtils.isTransposeOperation(hop.getInput().get(0))));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java",
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/RowAggTmplTest.java",
"diff": "@@ -51,6 +51,7 @@ public class RowAggTmplTest extends AutomatedTestBase\nprivate static final String TEST_NAME13 = TEST_NAME+\"13\"; //rowSums(X)+rowSums(Y)\nprivate static final String TEST_NAME14 = TEST_NAME+\"14\"; //colSums(max(floor(round(abs(min(sign(X+Y),1)))),7))\nprivate static final String TEST_NAME15 = TEST_NAME+\"15\"; //systemml nn - softmax backward (partially)\n+ private static final String TEST_NAME16 = TEST_NAME+\"16\"; //Y=X-rowIndexMax(X); R=Y/rowSums(Y)\nprivate static final String TEST_DIR = \"functions/codegen/\";\nprivate static final String TEST_CLASS_DIR = TEST_DIR + RowAggTmplTest.class.getSimpleName() + \"/\";\n@@ -62,7 +63,7 @@ public class RowAggTmplTest extends AutomatedTestBase\n@Override\npublic void setUp() {\nTestUtils.clearAssertionInformation();\n- for(int i=1; i<=15; i++)\n+ for(int i=1; i<=16; i++)\naddTestConfiguration( TEST_NAME+i, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME+i, new String[] { String.valueOf(i) }) );\n}\n@@ -291,6 +292,21 @@ public class RowAggTmplTest extends AutomatedTestBase\ntestCodegenIntegration( TEST_NAME15, false, ExecType.SPARK );\n}\n+ @Test\n+ public void testCodegenRowAggRewrite16CP() {\n+ testCodegenIntegration( TEST_NAME16, true, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg16CP() {\n+ testCodegenIntegration( TEST_NAME16, false, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCodegenRowAgg16SP() {\n+ testCodegenIntegration( TEST_NAME16, false, ExecType.SPARK );\n+ }\n+\nprivate void testCodegenIntegration( String testname, boolean rewrites, ExecType instType )\n{\nboolean oldFlag = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern16.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args<-commandArgs(TRUE)\n+options(digits=22)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+\n+X = matrix(seq(1,1500), 150, 10, byrow=TRUE);\n+\n+Y1 = X - max.col(X, ties.method=\"last\")\n+R = Y1 / rowSums(Y1)\n+\n+writeMM(as(R, \"CsparseMatrix\"), paste(args[2], \"S\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/rowAggPattern16.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = matrix(seq(1,1500), rows=150, cols=10);\n+\n+Y1 = X - rowIndexMax(X)\n+R = Y1 / rowSums(Y1)\n+\n+write(R, $1)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1590] Fix codegen handling of unsupported row aggregates |
49,736 | 07.05.2017 20:17:21 | 28,800 | 6c215e700c1855074228972f952663663f6eabaa | Incorporate ALLOW_OPERATOR_FUSION in ConvolutionOp for developer testing
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"new_path": "src/main/java/org/apache/sysml/hops/ConvolutionOp.java",
"diff": "@@ -153,15 +153,15 @@ public class ConvolutionOp extends Hop implements MultiThreadedHop\n// RELU_MAX_POOLING and RELU_MAX_POOLING_BACKWARD is extremely useful for CP backend\n// by reducing unnecessary sparse-to-dense-to-sparse conversion.\n// For other backends, this operators is not necessary as it reduces an additional relu operator.\n- if(et == ExecType.CP && op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {\n+ if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING && isInputReLU(inputs.get(0))) {\nin = inputs.get(0).getInput().get(0).constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING;\n}\n- else if(et == ExecType.CP && op == ConvOp.MAX_POOLING_BACKWARD && isInputReLU(inputs.get(0))) {\n+ else if(OptimizerUtils.ALLOW_OPERATOR_FUSION && et == ExecType.CP && op == ConvOp.MAX_POOLING_BACKWARD && isInputReLU(inputs.get(0))) {\nin = inputs.get(0).getInput().get(0).constructLops();\nlopOp = OperationTypes.RELU_MAX_POOLING_BACKWARD;\n}\n- else if(op == ConvOp.BIAS_ADD && isInputConv2d(inputs.get(0))) {\n+ else if(OptimizerUtils.ALLOW_OPERATOR_FUSION && op == ConvOp.BIAS_ADD && isInputConv2d(inputs.get(0))) {\nlopOp = OperationTypes.DIRECT_CONV2D_BIAS_ADD;\n// the first lop is image\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1573] Incorporate ALLOW_OPERATOR_FUSION in ConvolutionOp for developer testing
Closes #482. |
49,736 | 09.05.2017 22:30:32 | 25,200 | 755121f93d036df0c182edbf9a41640993f75535 | [MINOR] Included nn test dml files into the jar
This change makes it easier to test in different environments to ensure
that everything is running correctly. | [
{
"change_type": "MODIFY",
"old_path": "pom.xml",
"new_path": "pom.xml",
"diff": "<exclude>perftest</exclude>\n<exclude>staging/**/*</exclude>\n<exclude>staging</exclude>\n- <exclude>nn/test/**/*</exclude>\n+ <exclude>nn/test/compare_backends/*</exclude>\n<!-- <exclude>*.sh</exclude> --> <!-- applies to sparkDML.sh -->\n</excludes>\n<targetPath>scripts</targetPath>\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Included nn test dml files into the jar
This change makes it easier to test in different environments to ensure
that everything is running correctly. |
49,766 | 11.05.2017 06:20:06 | 25,200 | 8ad35c5b1a6958862dfab3d5f6232988ce57f36e | Update license to remove reference to node_modules
Node_modules is just a build temporary folder generated
by the website/documentation build and not shipped with
SystemML. | [
{
"change_type": "MODIFY",
"old_path": "src/assembly/source/LICENSE",
"new_path": "src/assembly/source/LICENSE",
"diff": "@@ -372,12 +372,3 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-\n-====\n-\n-All files located in the node_modules and external directories are\n-externally maintained libraries used by this software which have their\n-own licenses; we recommend you read them, as their terms may differ from\n-the terms above.\n-\n-\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1601] Update license to remove reference to node_modules
Node_modules is just a build temporary folder generated
by the website/documentation build and not shipped with
SystemML. |
49,766 | 11.05.2017 06:33:01 | 25,200 | 88e79bb341ea099e932cd85ace75389a5cb53ca7 | Fix Sizzle license
It seems that the sizzle.js license got split when the
pygments-default.css license was added. | [
{
"change_type": "MODIFY",
"old_path": "src/assembly/source/LICENSE",
"new_path": "src/assembly/source/LICENSE",
"diff": "@@ -322,6 +322,28 @@ available at https://github.com/jquery/sizzle\nThe following license applies to all parts of this software except as\ndocumented below:\n+===\n+\n+Permission is hereby granted, free of charge, to any person obtaining\n+a copy of this software and associated documentation files (the\n+\"Software\"), to deal in the Software without restriction, including\n+without limitation the rights to use, copy, modify, merge, publish,\n+distribute, sublicense, and/or sell copies of the Software, and to\n+permit persons to whom the Software is furnished to do so, subject to\n+the following conditions:\n+\n+The above copyright notice and this permission notice shall be\n+included in all copies or substantial portions of the Software.\n+\n+THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n+\n+\n================================================================================\nPygments (pygments-default.css) is distributed under the BSD license:\n@@ -351,24 +373,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n-\n-====\n-\n-Permission is hereby granted, free of charge, to any person obtaining\n-a copy of this software and associated documentation files (the\n-\"Software\"), to deal in the Software without restriction, including\n-without limitation the rights to use, copy, modify, merge, publish,\n-distribute, sublicense, and/or sell copies of the Software, and to\n-permit persons to whom the Software is furnished to do so, subject to\n-the following conditions:\n-\n-The above copyright notice and this permission notice shall be\n-included in all copies or substantial portions of the Software.\n-\n-THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1601] Fix Sizzle license
It seems that the sizzle.js license got split when the
pygments-default.css license was added. |
49,768 | 12.05.2017 10:01:14 | 25,200 | 6adcb369d3b9472cfe560c3dd7fef3be2a527bc5 | Add Linear Regression Notebook example | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "samples/jupyter-notebooks/Linear_Regression_Algorithms_Demo.ipynb",
"diff": "+{\n+ \"cells\": [\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"This notebook shows:\\n\",\n+ \"- Install SystemML Python package and jar file\\n\",\n+ \" - pip\\n\",\n+ \" - SystemML 'Hello World'\\n\",\n+ \"- Example 1: Matrix Multiplication\\n\",\n+ \" - SystemML script to generate a random matrix, perform matrix multiplication, and compute the sum of the output\\n\",\n+ \" - Examine execution plans, and increase data size to obverve changed execution plans\\n\",\n+ \"- Load diabetes dataset from scikit-learn\\n\",\n+ \"- Example 2: Implement three different algorithms to train linear regression model\\n\",\n+ \" - Algorithm 1: Linear Regression - Direct Solve (no regularization)\\n\",\n+ \" - Algorithm 2: Linear Regression - Batch Gradient Descent (no regularization)\\n\",\n+ \" - Algorithm 3: Linear Regression - Conjugate Gradient (no regularization)\\n\",\n+ \"- Example 3: Invoke existing SystemML algorithm script LinearRegDS.dml using MLContext API\\n\",\n+ \"- Example 4: Invoke existing SystemML algorithm using scikit-learn/SparkML pipeline like API\\n\",\n+ \"- Uninstall/Clean up SystemML Python package and jar file\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Install SystemML Python package and jar file\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"scrolled\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!pip install ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!pip show systemml\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Import SystemML API \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from systemml import MLContext, dml, dmlFromResource\\n\",\n+ \"\\n\",\n+ \"ml = MLContext(sc)\\n\",\n+ \"\\n\",\n+ \"print \\\"Spark Version:\\\", sc.version\\n\",\n+ \"print \\\"SystemML Version:\\\", ml.version()\\n\",\n+ \"print \\\"SystemML Built-Time:\\\", ml.buildTime()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"ml.execute(dml(\\\"\\\"\\\"s = 'Hello World!'\\\"\\\"\\\").output(\\\"s\\\")).get(\\\"s\\\")\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Import numpy, sklearn, and define some helper functions\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"import sys, os, glob, subprocess\\n\",\n+ \"import matplotlib.pyplot as plt\\n\",\n+ \"import numpy as np\\n\",\n+ \"from sklearn import datasets\\n\",\n+ \"plt.switch_backend('agg')\\n\",\n+ \" \\n\",\n+ \"def printLastLogLines(n):\\n\",\n+ \" fname = max(glob.iglob(os.sep.join([os.environ[\\\"HOME\\\"],'/logs/notebook/kernel-pyspark-*.log'])), key=os.path.getctime)\\n\",\n+ \" print(subprocess.check_output(['tail', '-' + str(n), fname]))\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"scrolled\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"import sys, os\\n\",\n+ \"SCRIPTS = os.sep.join([os.environ[\\\"HOME\\\"],'anaconda', 'lib', 'python' + sys.version[:3], 'site-packages', 'systemml', 'systemml-java', 'scripts'])\\n\",\n+ \"print SCRIPTS\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Example 1: Matrix Multiplication\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### SystemML script to generate a random matrix, perform matrix multiplication, and compute the sum of the output\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true,\n+ \"slideshow\": {\n+ \"slide_type\": \"-\"\n+ }\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"script = \\\"\\\"\\\"\\n\",\n+ \" X = rand(rows=$nr, cols=1000, sparsity=0.5)\\n\",\n+ \" A = t(X) %*% X\\n\",\n+ \" s = sum(A)\\n\",\n+ \"\\\"\\\"\\\"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"prog = dml(script).input('$nr', 1e5).output('s')\\n\",\n+ \"s = ml.execute(prog).get('s')\\n\",\n+ \"print s\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Examine execution plans, and increase data size to obverve changed execution plans\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true,\n+ \"scrolled\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"ml = MLContext(sc)\\n\",\n+ \"ml = ml.setStatistics(True)\\n\",\n+ \"# re-execute ML program\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"printLastLogLines(22)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"prog = dml(script).input('$nr', 1e6).output('s')\\n\",\n+ \"out = ml.execute(prog).get('s')\\n\",\n+ \"print out\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"ml = MLContext(sc)\\n\",\n+ \"ml = ml.setStatistics(False)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Load diabetes dataset from scikit-learn \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"%matplotlib inline\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"diabetes = datasets.load_diabetes()\\n\",\n+ \"diabetes_X = diabetes.data[:, np.newaxis, 2]\\n\",\n+ \"diabetes_X_train = diabetes_X[:-20]\\n\",\n+ \"diabetes_X_test = diabetes_X[-20:]\\n\",\n+ \"diabetes_y_train = diabetes.target[:-20].reshape(-1,1)\\n\",\n+ \"diabetes_y_test = diabetes.target[-20:].reshape(-1,1)\\n\",\n+ \"\\n\",\n+ \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n+ \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"diabetes.data.shape\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Example 2: Implement three different algorithms to train linear regression model\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"source\": [\n+ \"## Algorithm 1: Linear Regression - Direct Solve (no regularization) \"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### Least squares formulation\\n\",\n+ \"w* = argminw ||Xw-y||2 = argminw (y - Xw)'(y - Xw) = argminw (w'(X'X)w - w'(X'y))/2\\n\",\n+ \"\\n\",\n+ \"#### Setting the gradient\\n\",\n+ \"dw = (X'X)w - (X'y) to 0, w = (X'X)-1(X' y) = solve(X'X, X'y)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"script = \\\"\\\"\\\"\\n\",\n+ \" # add constant feature to X to model intercept\\n\",\n+ \" X = cbind(X, matrix(1, rows=nrow(X), cols=1))\\n\",\n+ \" A = t(X) %*% X\\n\",\n+ \" b = t(X) %*% y\\n\",\n+ \" w = solve(A, b)\\n\",\n+ \" bias = as.scalar(w[nrow(w),1])\\n\",\n+ \" w = w[1:nrow(w)-1,]\\n\",\n+ \"\\\"\\\"\\\"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true,\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"prog = dml(script).input(X=diabetes_X_train, y=diabetes_y_train).output('w', 'bias')\\n\",\n+ \"w, bias = ml.execute(prog).get('w','bias')\\n\",\n+ \"w = w.toNumPy()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"scrolled\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n+ \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n+ \"\\n\",\n+ \"plt.plot(diabetes_X_test, (w*diabetes_X_test)+bias, color='blue', linestyle ='dotted')\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"source\": [\n+ \"## Algorithm 2: Linear Regression - Batch Gradient Descent (no regularization)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"#### Algorithm\\n\",\n+ \"`Step 1: Start with an initial point \\n\",\n+ \"while(not converged) { \\n\",\n+ \" Step 2: Compute gradient dw. \\n\",\n+ \" Step 3: Compute stepsize alpha. \\n\",\n+ \" Step 4: Update: wnew = wold + alpha*dw \\n\",\n+ \"}`\\n\",\n+ \"\\n\",\n+ \"#### Gradient formula\\n\",\n+ \"`dw = r = (X'X)w - (X'y)`\\n\",\n+ \"\\n\",\n+ \"#### Step size formula\\n\",\n+ \"`Find number alpha to minimize f(w + alpha*r) \\n\",\n+ \"alpha = -(r'r)/(r'X'Xr)`\\n\",\n+ \"\\n\",\n+ \"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"script = \\\"\\\"\\\"\\n\",\n+ \" # add constant feature to X to model intercepts\\n\",\n+ \" X = cbind(X, matrix(1, rows=nrow(X), cols=1))\\n\",\n+ \" max_iter = 100\\n\",\n+ \" w = matrix(0, rows=ncol(X), cols=1)\\n\",\n+ \" for(i in 1:max_iter){\\n\",\n+ \" XtX = t(X) %*% X\\n\",\n+ \" dw = XtX %*%w - t(X) %*% y\\n\",\n+ \" alpha = -(t(dw) %*% dw) / (t(dw) %*% XtX %*% dw)\\n\",\n+ \" w = w + dw*alpha\\n\",\n+ \" }\\n\",\n+ \" bias = as.scalar(w[nrow(w),1])\\n\",\n+ \" w = w[1:nrow(w)-1,] \\n\",\n+ \"\\\"\\\"\\\"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"prog = dml(script).input(X=diabetes_X_train, y=diabetes_y_train).output('w').output('bias')\\n\",\n+ \"w, bias = ml.execute(prog).get('w', 'bias')\\n\",\n+ \"w = w.toNumPy()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"scrolled\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n+ \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n+ \"\\n\",\n+ \"plt.plot(diabetes_X_test, (w*diabetes_X_test)+bias, color='red', linestyle ='dashed')\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Algorithm 3: Linear Regression - Conjugate Gradient (no regularization)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"Problem with gradient descent: Takes very similar directions many times\\n\",\n+ \"\\n\",\n+ \"Solution: Enforce conjugacy\\n\",\n+ \"\\n\",\n+ \"`Step 1: Start with an initial point \\n\",\n+ \"while(not converged) {\\n\",\n+ \" Step 2: Compute gradient dw.\\n\",\n+ \" Step 3: Compute stepsize alpha.\\n\",\n+ \" Step 4: Compute next direction p by enforcing conjugacy with previous direction.\\n\",\n+ \" Step 4: Update: w_new = w_old + alpha*p\\n\",\n+ \"}`\\n\",\n+ \"\\n\",\n+ \"\\n\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"script = \\\"\\\"\\\"\\n\",\n+ \" # add constant feature to X to model intercepts\\n\",\n+ \" X = cbind(X, matrix(1, rows=nrow(X), cols=1))\\n\",\n+ \" m = ncol(X); i = 1; \\n\",\n+ \" max_iter = 20;\\n\",\n+ \" w = matrix (0, rows = m, cols = 1); # initialize weights to 0\\n\",\n+ \" dw = - t(X) %*% y; p = - dw; # dw = (X'X)w - (X'y)\\n\",\n+ \" norm_r2 = sum (dw ^ 2); \\n\",\n+ \" for(i in 1:max_iter) {\\n\",\n+ \" q = t(X) %*% (X %*% p)\\n\",\n+ \" alpha = norm_r2 / sum (p * q); # Minimizes f(w - alpha*r)\\n\",\n+ \" w = w + alpha * p; # update weights\\n\",\n+ \" dw = dw + alpha * q; \\n\",\n+ \" old_norm_r2 = norm_r2; norm_r2 = sum (dw ^ 2);\\n\",\n+ \" p = -dw + (norm_r2 / old_norm_r2) * p; # next direction - conjugacy to previous direction\\n\",\n+ \" i = i + 1;\\n\",\n+ \" }\\n\",\n+ \" bias = as.scalar(w[nrow(w),1])\\n\",\n+ \" w = w[1:nrow(w)-1,] \\n\",\n+ \"\\\"\\\"\\\"\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"prog = dml(script).input(X=diabetes_X_train, y=diabetes_y_train).output('w').output('bias')\\n\",\n+ \"w, bias = ml.execute(prog).get('w','bias')\\n\",\n+ \"w = w.toNumPy()\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"scrolled\": false\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n+ \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n+ \"\\n\",\n+ \"plt.plot(diabetes_X_test, (w*diabetes_X_test)+bias, color='red', linestyle ='dashed')\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Example 3: Invoke existing SystemML algorithm script LinearRegDS.dml using MLContext API\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"prog = dml(\\\"/Users/user_name/git/incubator-systemml/scripts/algorithms/LinearRegDS.dml\\\").input(X=diabetes_X_train, y=diabetes_y_train).input('$icpt',1.0).output('beta_out')\\n\",\n+ \"w = ml.execute(prog).get('beta_out')\\n\",\n+ \"w = w.toNumPy()\\n\",\n+ \"bias=w[1]\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n+ \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n+ \"\\n\",\n+ \"plt.plot(diabetes_X_test, (w[0]*diabetes_X_test)+bias, color='red', linestyle ='dashed')\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Example 4: Invoke existing SystemML algorithm using scikit-learn/SparkML pipeline like API\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"*mllearn* API allows a Python programmer to invoke SystemML's algorithms using scikit-learn like API as well as Spark's MLPipeline API.\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"from pyspark.sql import SQLContext\\n\",\n+ \"from systemml.mllearn import LinearRegression\\n\",\n+ \"sqlCtx = SQLContext(sc)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"regr = LinearRegression(sqlCtx)\\n\",\n+ \"# Train the model using the training sets\\n\",\n+ \"regr.fit(diabetes_X_train, diabetes_y_train)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n+ \"outputs\": [],\n+ \"source\": [\n+ \"predictions = regr.predict(diabetes_X_test)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"# Use the trained model to perform prediction\\n\",\n+ \"%matplotlib inline\\n\",\n+ \"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n+ \"plt.scatter(diabetes_X_test, diabetes_y_test, color='red')\\n\",\n+ \"\\n\",\n+ \"plt.plot(diabetes_X_test, predictions, color='black')\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"# Uninstall/Clean up SystemML Python package and jar file\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!yes | pip uninstall systemml\"\n+ ]\n+ }\n+ ],\n+ \"metadata\": {\n+ \"kernelspec\": {\n+ \"display_name\": \"Python 2\",\n+ \"language\": \"python\",\n+ \"name\": \"python2\"\n+ },\n+ \"language_info\": {\n+ \"codemirror_mode\": {\n+ \"name\": \"ipython\",\n+ \"version\": 2\n+ },\n+ \"file_extension\": \".py\",\n+ \"mimetype\": \"text/x-python\",\n+ \"name\": \"python\",\n+ \"nbconvert_exporter\": \"python\",\n+ \"pygments_lexer\": \"ipython2\",\n+ \"version\": \"2.7.11\"\n+ }\n+ },\n+ \"nbformat\": 4,\n+ \"nbformat_minor\": 1\n+}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1607] Add Linear Regression Notebook example |
49,767 | 12.05.2017 13:59:20 | 25,200 | d04d2381f369bc29c4c33e98381bcdc8a4d0aebb | Add Deconvolution layer in nn library.
Closes | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "scripts/nn/layers/conv2d_transpose.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+/*\n+ * 2D Transpose convolutional layer.\n+ *\n+ * Utilizes built-in convolution operators for higher performance.\n+ */\n+\n+forward = function(matrix[double] X, matrix[double] W, matrix[double] b,\n+ int C, int Hin, int Win, int Hf, int Wf,\n+ int strideh, int stridew, int padh, int padw,\n+ int out_padh, int out_padw)\n+ return (matrix[double] out, int Hout, int Wout){\n+ /*\n+ * Computes the forward pass for a 2D spatial transpose convolutional\n+ * layer with F filters. The input data has N examples, each\n+ * represented as a 3D tensor flattened into a single vector.\n+ *\n+ * Inputs:\n+ * - X: Inputs, of shape (N, C*Hin*Win).\n+ * - W: Weights, of shape (F, C*Hf*Wf).\n+ * - b: Biases, of shape (F, 1).\n+ * - C: Number of input channels (dimensionality of depth).\n+ * - Hin: Input height.\n+ * - Win: Input width.\n+ * - Hf: Filter height.\n+ * - Wf: Filter width.\n+ * - strideh: Stride over height.\n+ * - stridew: Stride over width.\n+ * - padh: Padding for top and bottom sides.\n+ * - padw: Padding for left and right sides.\n+ * - out_padh: extra padding for top side. This should\n+ * lie in [0, strideh-1].\n+ * - out_padw: extra padding for right side. This should\n+ * lie in [0, stridew-1].\n+ *\n+ * Outputs:\n+ * - out: Outputs, of shape (N, F*Hout*Wout).\n+ * - Hout: Output height.\n+ * - Wout: Output width.\n+ */\n+ N = nrow(X)\n+ F = nrow(W)\n+ Hout = strideh * (Hin-1) - 2*padh + Hf + out_padh\n+ Wout = stridew * (Win-1) - 2*padw + Wf + out_padw\n+\n+ /*\n+ * Transpose convolution aims to go in the other direction of\n+ * (direct) convolution, i.e., given input X, produce output O such\n+ * that running convolution on O recovers X. This is achieved by\n+ * conv2d_backward_data (since the derivative wrt data must produce\n+ * output of same size as the input to conv2d). By reusing a built-in\n+ * operator we achieve efficiency and restrict the number of built-in\n+ * operators to manageable levels. Plus, most other deep-learning\n+ * packages make use of the same strategy which means this\n+ * implementation of transpose convolution is 'in-sync' with them.\n+ *\n+ * One potential downside of reusing conv2d_backward_data is the fact\n+ * that it rotates the filter by 180 degrees before applying it. This\n+ * needs to be kept in mind when interpreting the output of transpose\n+ * convolution.\n+ */\n+ out = conv2d_backward_data(W, X, stride=[strideh,stridew], padding=[padh,padw],\n+ input_shape=[N,C,Hout,Wout], filter_shape=[F,C,Hf,Wf])\n+\n+ out = bias_add(out, b)\n+}\n+\n+backward = function(matrix[double] dout, int Hout, int Wout,\n+ matrix[double] X, matrix[double] W, matrix[double] b,\n+ int C, int Hin, int Win, int Hf, int Wf,\n+ int strideh, int stridew, int padh, int padw)\n+ return (matrix[double] dX, matrix[double] dW, matrix[double] db){\n+ /*\n+ * Computes the backward pass for a 2D spatial transpose\n+ * convolutional layer with F filters.\n+ *\n+ * Inputs:\n+ * - dout: Gradient wrt `out` from upstream, of\n+ * shape (N, F*Hout*Wout).\n+ * - Hout: Output height.\n+ * - Wout: Output width.\n+ * - X: Inputs, of shape (N, C*Hin*Win).\n+ * - W: Weights, of shape (F, C*Hf*Wf).\n+ * - b: Biases, of shape (F, 1).\n+ * - C: Number of input channels (dimensionality of depth).\n+ * - Hin: Input height.\n+ * - Win: Input width.\n+ * - Hf: Filter height.\n+ * - Wf: Filter width.\n+ * - strideh: Stride over height.\n+ * - stridew: Stride over width.\n+ * - padh: Padding for top and bottom sides.\n+ * - padw: Padding for left and right sides.\n+ *\n+ * Outputs:\n+ * - dX: Gradient wrt `X`, of shape (N, C*Hin*Win).\n+ * - dW: Gradient wrt `W`, of shape (F, C*Hf*Wf).\n+ * - db: Gradient wrt `b`, of shape (F, 1).\n+ */\n+ N = nrow(X)\n+ F = nrow(W)\n+\n+ /*\n+ * conv2d_backward_filter takes the input and delta map as first and\n+ * second args, respectively. Given that, we need to compute the\n+ * grad (wrt to filter) for transpose convolution where the roles of\n+ * the input and output are reversed, we reverse the order of the\n+ * args (along with setting input_shape to the delta map shape).\n+ * Effectively, we are running a direct convolution with X as the\n+ * filter and the dout as the input. To convince oneself that the\n+ * interconnections between the cells of the filter, input and delta\n+ * map are preserved please keep in mind that the forward of\n+ * convolution transpose rotates the filter by 180 degrees before\n+ * applying it.\n+ */\n+ dW = conv2d_backward_filter(dout, X, stride=[strideh,stridew], padding=[padh,padw],\n+ input_shape=[N,C,Hout,Wout], filter_shape=[F,C,Hf,Wf])\n+\n+ /*\n+ * Since the forward for transpose convolution makes a call to\n+ * conv2d_backward_data, to compute its derivative wrt to data\n+ * we can run conv2d by applying the filter on the delta\n+ * map (this makes sense because convolution transpose is the\n+ * 'reverse' of convolution). Its easy to see that this will produce\n+ * output of the required size. To convince oneself that conv2d will\n+ * respect the interconnections between the cells in the delta map\n+ * and the filter, keep in mind that the forward function rotates the\n+ * filter by 180 degrees before applying it.\n+ */\n+ dX = conv2d(dout, W, input_shape=[N,C,Hout,Wout], filter_shape=[F,C,Hf,Wf],\n+ stride=[strideh,stridew], padding=[padh,padw])\n+\n+ db = rowSums(matrix(colSums(dout), rows=F, cols=Hout*Wout))\n+}\n+\n+init = function(int F, int C, int Hf, int Wf)\n+ return (matrix[double] W, matrix[double] b){\n+ /*\n+ * Utility function to initialize the parameters of this layer.\n+ *\n+ * We use the heuristic by He et al., which limits the magnification\n+ * of inputs/gradients during forward/backward passes by scaling\n+ * unit-Gaussian weights by a factor of sqrt(2/n), under the\n+ * assumption of relu neurons.\n+ * - http://arxiv.org/abs/1502.01852\n+ *\n+ * Inputs:\n+ * - F: Number of filters.\n+ * - C: Number of input channels (dimensionality of depth).\n+ * - Hf: Filter height.\n+ * - Wf: Filter width.\n+ *\n+ * Outputs:\n+ * - W: Weights, of shape (F, C*Hf*Wf).\n+ * - b: Biases, of shape (F, 1).\n+ */\n+ W = rand(rows=F, cols=C*Hf*Wf, pdf=\"normal\") * sqrt(2/(C*Hf*Wf))\n+ b = matrix(0, rows=F, cols=1)\n+}\n+\n+init_bilinear = function(int C, int K)\n+ return (matrix[double] W, matrix[double] b){\n+ /*\n+ * Utility function to upsample using this layer.\n+ *\n+ * Upsampling the input by factor f (each side) requires\n+ * channel-wise independent kernels of size K = 2f - f%2,\n+ * stride = f and pad = ceil((f-1)/2). The weights are set\n+ * via bilinear interpolation, bias is set to 0.\n+ *\n+ * Inputs:\n+ * - C: Number of input channels (dimensionality of depth).\n+ * - K: Kernel size (upsampling requires a square filter\n+ * of size K X K).\n+ *\n+ * Outputs:\n+ * - W: Weights, of shape (C, C*K*K).\n+ * - b: Biases, of shape (C, 1).\n+ */\n+ factor_up = ceil(K / 2)\n+ center = (2 * factor_up - factor_up %% 2 - 1) / 2 / factor_up\n+ vect = 1 - abs(seq(0, K-1) / factor_up - center)\n+ weights = matrix(vect %*% t(vect), rows=1, cols=K*K)\n+\n+ /*\n+ * To create a multi-channel channel-independent upsampling filter,\n+ * we need to intersperse the filter weights with 0s. For instance,\n+ * consider the case of 2X upsampling. In this case, K=4 and we have\n+ * K^2=16 weights to include into the 3D tensor representing the\n+ * filter which should look like the following (assuming 3 channels):\n+ *\n+ * <-16 weights-> <---------32 0s--------->\n+ * X X ...... X X 0 0 0 ............. 0 0 0\n+ * 0 .......... 0 X X .... X X 0 ...... 0 0\n+ * 0 0 0 ............... 0 0 0 X X .... X X\n+ *\n+ * To be clear, the second row should have 16 0s followed by 16\n+ * weights followed by 16 0s.\n+ *\n+ * To create the above filter, we take advantage of the fact that\n+ * between two sets of non-zero weights, there is always a sequence\n+ * of C*K*K 0s. In the above example, C*K^2 = 48 (e.g., 32 trailing\n+ * 0s in the first row and 16 leading 0s in the second row).\n+ *\n+ * Note that, in the special case of C=1 we do not need to\n+ * intersperse with 0s (no question of being channel-wise independent\n+ * since we have only 1 channel).\n+ */\n+ #if(C > 1){\n+ /*\n+ * Append C*K*K trailing 0s to the K*K kernel and replicate the\n+ * resulting row C times\n+ */\n+ repl_weights = matrix(1, rows=C, cols=1) %*% cbind(weights, matrix(0, rows=1, cols=C*K*K))\n+\n+ /*\n+ * The above operation added extra C*K*K trailing 0s in the last row\n+ * that we do not need. Thus, we need to:\n+ * 1) reshape the resulting matrix into a row\n+ * 2) 'Clip off' the last few 0s using indexing and reshape the\n+ * result into the expected filter shape ([C, C, K, K])\n+ */\n+ repl_weights_row = matrix(repl_weights, rows=1, cols=C*(C+1)*K^2)\n+ W = matrix(repl_weights_row[1,1:(C*K)^2], rows=C, cols=C*K^2)\n+ #}else W = weights\n+\n+ b = matrix(0, rows=C, cols=1)\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/grad_check.dml",
"new_path": "scripts/nn/test/grad_check.dml",
"diff": "@@ -27,6 +27,7 @@ source(\"nn/layers/batch_norm1d.dml\") as batch_norm1d\nsource(\"nn/layers/batch_norm2d.dml\") as batch_norm2d\nsource(\"nn/layers/conv2d.dml\") as conv2d\nsource(\"nn/layers/conv2d_builtin.dml\") as conv2d_builtin\n+source(\"nn/layers/conv2d_transpose.dml\") as conv2d_transpose\nsource(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\nsource(\"nn/layers/dropout.dml\") as dropout\nsource(\"nn/layers/l1_loss.dml\") as l1_loss\n@@ -616,6 +617,114 @@ conv2d_simple = function() {\n}\n}\n+conv2d_transpose = function() {\n+ /*\n+ * Gradient check for the 2D convolution transpose layer.\n+ */\n+ print(\"Grad checking the 2D convolution transpose layer with L2 loss.\")\n+\n+ N = 2\n+ C = 2\n+ Hin = 3\n+ Win = 3\n+ F = 2\n+ Hf = 3\n+ Wf = 3\n+ stride = 2\n+ pad = 1\n+ out_pad = 1\n+\n+ X = rand(rows=N, cols=C*Hin*Win)\n+\n+ [W,b] = conv2d_transpose::init(F, C, Hf, Wf)\n+\n+ [out, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride,\n+ pad, pad, out_pad, out_pad)\n+\n+ y = rand(rows=N, cols=F*Hout*Wout)\n+\n+ dout = l2_loss::backward(out,y)\n+\n+ [dX, dW, db] = conv2d_transpose::backward(dout, Hout, Wout, X, W, b, C, Hin, Win, Hf, Wf,\n+ stride, stride, pad, pad)\n+\n+ h = 1e-5\n+ print(\" - Grad checking X.\")\n+ for (i in 1:nrow(X)) {\n+ for (j in 1:ncol(X)) {\n+ old = as.scalar(X[i,j])\n+ X[i,j] = old - h\n+\n+ [outmh, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride,\n+ pad, pad, out_pad, out_pad)\n+\n+ lossmh = l2_loss::forward(outmh, y)\n+\n+ X[i,j] = old + h\n+ [outph, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride,\n+ pad, pad, out_pad, out_pad)\n+\n+ lossph = l2_loss::forward(outph, y)\n+\n+ X[i,j] = old\n+\n+ dX_num = (lossph-lossmh) / (2*h)\n+\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dX[i,j]), dX_num, lossph, lossmh)\n+ }\n+ }\n+\n+ print(\" - Grad checking W.\")\n+ for (i in 1:nrow(W)) {\n+ for (j in 1:ncol(W)) {\n+ old = as.scalar(W[i,j])\n+ W[i,j] = old - h\n+\n+ [outmh, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride,\n+ pad, pad, out_pad, out_pad)\n+\n+ lossmh = l2_loss::forward(outmh, y)\n+\n+ W[i,j] = old + h\n+ [outph, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride,\n+ pad, pad, out_pad, out_pad)\n+\n+ lossph = l2_loss::forward(outph, y)\n+\n+ W[i,j] = old\n+\n+ dW_num = (lossph-lossmh) / (2*h)\n+\n+ rel_error = test_util::check_rel_grad_error(as.scalar(dW[i,j]), dW_num, lossph, lossmh)\n+ }\n+ }\n+\n+ print(\" - Grad checking b.\")\n+ for (i in 1:nrow(b)) {\n+ for (j in 1:ncol(b)) {\n+ old = as.scalar(b[i,j])\n+ b[i,j] = old - h\n+\n+ [outmh, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride,\n+ pad, pad, out_pad, out_pad)\n+\n+ lossmh = l2_loss::forward(outmh, y)\n+\n+ b[i,j] = old + h\n+ [outph, Hout, Wout] = conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride,\n+ pad, pad, out_pad, out_pad)\n+\n+ lossph = l2_loss::forward(outph, y)\n+\n+ b[i,j] = old\n+\n+ db_num = (lossph-lossmh) / (2*h)\n+\n+ rel_error = test_util::check_rel_grad_error(as.scalar(db[i,j]), db_num, lossph, lossmh)\n+ }\n+ }\n+}\n+\ncross_entropy_loss = function() {\n/*\n* Gradient check for the cross-entropy loss function.\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/run_tests.dml",
"new_path": "scripts/nn/test/run_tests.dml",
"diff": "@@ -45,6 +45,7 @@ grad_check::batch_norm2d()\ngrad_check::conv2d()\ngrad_check::conv2d_builtin()\ngrad_check::conv2d_simple()\n+grad_check::conv2d_transpose()\ngrad_check::dropout()\ngrad_check::lstm()\ngrad_check::max_pool2d()\n@@ -85,6 +86,7 @@ print(\"---\")\ntest::batch_norm1d()\ntest::batch_norm2d()\ntest::conv2d()\n+test::conv2d_transpose()\ntest::cross_entropy_loss()\ntest::im2col()\ntest::max_pool2d()\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/nn/test/test.dml",
"new_path": "scripts/nn/test/test.dml",
"diff": "@@ -26,6 +26,7 @@ source(\"nn/layers/batch_norm1d.dml\") as batch_norm1d\nsource(\"nn/layers/batch_norm2d.dml\") as batch_norm2d\nsource(\"nn/layers/conv2d.dml\") as conv2d\nsource(\"nn/layers/conv2d_builtin.dml\") as conv2d_builtin\n+source(\"nn/layers/conv2d_transpose.dml\") as conv2d_transpose\nsource(\"nn/layers/cross_entropy_loss.dml\") as cross_entropy_loss\nsource(\"nn/layers/max_pool2d.dml\") as max_pool2d\nsource(\"nn/layers/max_pool2d_builtin.dml\") as max_pool2d_builtin\n@@ -108,6 +109,42 @@ conv2d = function() {\n}\n}\n+conv2d_transpose = function() {\n+ /*\n+ * Test for the 2D convolution transpose function.\n+ */\n+ print(\"Testing the 2D convolution transpose function.\")\n+\n+ N = 1\n+ C = 1\n+ Hin = 2\n+ Win = 2\n+ F = 1\n+ Hf = 3\n+ Wf = 3\n+ stride = 1\n+ pad = 0\n+ out_pad = 0\n+\n+ X = matrix(seq(1,N*C*Hin*Win), rows=N, cols=C*Hin*Win)\n+ W = matrix(seq(1,F*C*Hf*Wf), rows=F, cols=C*Hf*Wf)\n+ b = matrix(0, rows=F, cols=1)\n+\n+ # Forward\n+ [out, Hout, Wout] =\n+ conv2d_transpose::forward(X, W, b, C, Hin, Win, Hf, Wf, stride, stride, pad, pad, out_pad, out_pad)\n+\n+ # Equivalency check\n+ target = matrix(\"1 4 7 6 7 23 33 24 19 53 63 42 21 52 59 36\", rows=N, cols=C*Hout*Wout)\n+\n+ for (i in 1:nrow(out)) {\n+ for(j in 1:ncol(out)) {\n+ rel_error = test_util::check_rel_error(as.scalar(out[1,i]),\n+ as.scalar(target[1,i]), 1e-3, 1e-4)\n+ }\n+ }\n+}\n+\ncross_entropy_loss = function() {\n/*\n* Test for the cross-entropy loss function.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1483] Add Deconvolution layer in nn library.
Closes #490. |
49,737 | 16.05.2017 10:59:11 | 25,200 | ecadf0fe8d1ae88b7b79c9fee99d6a58a847be77 | Display version in MLContext welcome message
Closes | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java",
"new_path": "src/main/java/org/apache/sysml/api/mlcontext/MLContextUtil.java",
"diff": "@@ -976,6 +976,14 @@ public final class MLContextUtil {\npublic static String welcomeMessage() {\nStringBuilder sb = new StringBuilder();\nsb.append(\"\\nWelcome to Apache SystemML!\\n\");\n+ try {\n+ ProjectInfo info = ProjectInfo.getProjectInfo();\n+ if (info.version() != null) {\n+ sb.append(\"Version \");\n+ sb.append(info.version());\n+ }\n+ } catch (MLContextException e) {\n+ }\nreturn sb.toString();\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1600] Display version in MLContext welcome message
Closes #502. |
49,768 | 17.05.2017 10:09:51 | 25,200 | 0d553e38496560fff431808080b6566b295d7911 | Update notebook samples with latest code
Delayed "Flight Delay Prediction" notebook temporarily due to issues. | [
{
"change_type": "DELETE",
"old_path": "samples/jupyter-notebooks/Flight_Delay_Demo.ipynb",
"new_path": null,
"diff": "-{\"nbformat_minor\": 0, \"cells\": [{\"source\": \"# Flight Delay Prediction Demo Using SystemML\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"source\": \"This notebook is based on datascientistworkbench.com's tutorial notebook for predicting flight delay.\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"source\": \"## Loading SystemML \", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"source\": \"To use one of the released version, use \\\"%AddDeps org.apache.systemml systemml 0.9.0-incubating\\\". To use nightly build, \\\"%AddJar https://sparktc.ibmcloud.com/repo/latest/SystemML.jar\\\"\\n\\nOr you provide SystemML.jar and dependency through commandline when starting the notebook (for example: --packages com.databricks:spark-csv_2.10:1.4.0 --jars SystemML.jar)\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 1, \"cell_type\": \"code\", \"source\": \"%AddJar https://sparktc.ibmcloud.com/repo/latest/SystemML.jar\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"Using cached version of SystemML.jar\\n\"}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"Use Spark's CSV package for loading the CSV file\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 2, \"cell_type\": \"code\", \"source\": \"%AddDeps com.databricks spark-csv_2.10 1.4.0\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \":: loading settings :: url = jar:file:/usr/local/spark-kernel/lib/ivy-2.4.0.jar!/org/apache/ivy/core/settings/ivysettings.xml\\n:: resolving dependencies :: com.ibm.spark#spark-kernel;working [not transitive]\\n\\tconfs: [default]\\n\\tfound com.databricks#spark-csv_2.10;1.4.0 in central\\ndownloading https://repo1.maven.org/maven2/com/databricks/spark-csv_2.10/1.4.0/spark-csv_2.10-1.4.0.jar ...\\n\\t[SUCCESSFUL ] com.databricks#spark-csv_2.10;1.4.0!spark-csv_2.10.jar (68ms)\\n:: resolution report :: resolve 642ms :: artifacts dl 72ms\\n\\t:: modules in use:\\n\\tcom.databricks#spark-csv_2.10;1.4.0 from central in [default]\\n\\t---------------------------------------------------------------------\\n\\t| | modules || artifacts |\\n\\t| conf | number| search|dwnlded|evicted|| number|dwnlded|\\n\\t---------------------------------------------------------------------\\n\\t| default | 1 | 1 | 1 | 0 || 1 | 1 |\\n\\t---------------------------------------------------------------------\\n:: retrieving :: com.ibm.spark#spark-kernel\\n\\tconfs: [default]\\n\\t1 artifacts copied, 0 already retrieved (153kB/9ms)\\n\"}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"## Import Data\", \"cell_type\": \"markdown\", \"metadata\": {\"collapsed\": true}}, {\"source\": \"Download the airline dataset from stat-computing.org if not already downloaded\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 3, \"cell_type\": \"code\", \"source\": \"import sys.process._\\nimport java.net.URL\\nimport java.io.File\\nval url = \\\"http://stat-computing.org/dataexpo/2009/2007.csv.bz2\\\"\\nval localFilePath = \\\"airline2007.csv.bz2\\\"\\nif(!new java.io.File(localFilePath).exists) {\\n new URL(url) #> new File(localFilePath) !!\\n}\", \"outputs\": [], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"Load the dataset into DataFrame using Spark CSV package\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 4, \"cell_type\": \"code\", \"source\": \"import org.apache.spark.sql.SQLContext\\nimport org.apache.spark.storage.StorageLevel\\nval sqlContext = new SQLContext(sc)\\nval fmt = sqlContext.read.format(\\\"com.databricks.spark.csv\\\")\\nval opt = fmt.options(Map(\\\"header\\\"->\\\"true\\\", \\\"inferSchema\\\"->\\\"true\\\"))\\nval airline = opt.load(localFilePath).na.replace( \\\"*\\\", Map(\\\"NA\\\" -> \\\"0.0\\\") )\", \"outputs\": [], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"execution_count\": 5, \"cell_type\": \"code\", \"source\": \"airline.printSchema\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"root\\n |-- Year: integer (nullable = true)\\n |-- Month: integer (nullable = true)\\n |-- DayofMonth: integer (nullable = true)\\n |-- DayOfWeek: integer (nullable = true)\\n |-- DepTime: string (nullable = true)\\n |-- CRSDepTime: integer (nullable = true)\\n |-- ArrTime: string (nullable = true)\\n |-- CRSArrTime: integer (nullable = true)\\n |-- UniqueCarrier: string (nullable = true)\\n |-- FlightNum: integer (nullable = true)\\n |-- TailNum: string (nullable = true)\\n |-- ActualElapsedTime: string (nullable = true)\\n |-- CRSElapsedTime: string (nullable = true)\\n |-- AirTime: string (nullable = true)\\n |-- ArrDelay: string (nullable = true)\\n |-- DepDelay: string (nullable = true)\\n |-- Origin: string (nullable = true)\\n |-- Dest: string (nullable = true)\\n |-- Distance: integer (nullable = true)\\n |-- TaxiIn: integer (nullable = true)\\n |-- TaxiOut: integer (nullable = true)\\n |-- Cancelled: integer (nullable = true)\\n |-- CancellationCode: string (nullable = true)\\n |-- Diverted: integer (nullable = true)\\n |-- CarrierDelay: integer (nullable = true)\\n |-- WeatherDelay: integer (nullable = true)\\n |-- NASDelay: integer (nullable = true)\\n |-- SecurityDelay: integer (nullable = true)\\n |-- LateAircraftDelay: integer (nullable = true)\\n\\n\"}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"## Data Exploration\\nWhich airports have the most delays?\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 6, \"cell_type\": \"code\", \"source\": \"airline.registerTempTable(\\\"airline\\\")\\nsqlContext.sql(\\\"\\\"\\\"SELECT Origin, count(*) conFlight, avg(DepDelay) delay\\n FROM airline\\n GROUP BY Origin\\n ORDER BY delay DESC\\\"\\\"\\\").show\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"+------+---------+------------------+\\n|Origin|conFlight| delay|\\n+------+---------+------------------+\\n| PIR| 4| 45.5|\\n| ACK| 314|45.296178343949045|\\n| SOP| 195| 34.02051282051282|\\n| HHH| 997| 22.58776328986961|\\n| MCN| 992|22.496975806451612|\\n| AKN| 235|21.123404255319148|\\n| CEC| 1055|20.807582938388627|\\n| GNV| 1927| 20.69797612869746|\\n| EYW| 1052|20.224334600760457|\\n| ACY| 735|20.141496598639456|\\n| SPI| 1745|19.545558739255014|\\n| GST| 90|19.233333333333334|\\n| EWR| 154113|18.800853918877706|\\n| BRW| 726| 18.02754820936639|\\n| AGS| 2286|17.728346456692915|\\n| ORD| 375784|17.695756072637472|\\n| TRI| 1207| 17.63628831814416|\\n| SBN| 5128|17.505850234009362|\\n| FAY| 2185| 17.48970251716247|\\n| PHL| 104063|17.067776250924922|\\n+------+---------+------------------+\\nonly showing top 20 rows\\n\\n\"}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"## Modeling: Logistic Regression\\n\\nPredict departure delays of greater than 15 of flights from JFK\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 8, \"cell_type\": \"code\", \"source\": \"sqlContext.udf.register(\\\"checkDelay\\\", (depDelay:String) => try { if(depDelay.toDouble > 15) 1.0 else 2.0 } catch { case e:Exception => 1.0 })\\nval tempSmallAirlineData = sqlContext.sql(\\\"SELECT *, checkDelay(DepDelay) label FROM airline WHERE Origin = 'JFK'\\\").persist(StorageLevel.MEMORY_AND_DISK)\\nval popularDest = tempSmallAirlineData.select(\\\"Dest\\\").map(y => (y.get(0).toString, 1)).reduceByKey(_ + _).filter(_._2 > 1000).collect.toMap\\nsqlContext.udf.register(\\\"onlyUsePopularDest\\\", (x:String) => popularDest.contains(x))\\ntempSmallAirlineData.registerTempTable(\\\"tempAirline\\\")\\nval smallAirlineData = sqlContext.sql(\\\"SELECT * FROM tempAirline WHERE onlyUsePopularDest(Dest)\\\")\\n\\nval datasets = smallAirlineData.randomSplit(Array(0.7, 0.3))\\nval trainDataset = datasets(0).cache\\nval testDataset = datasets(1).cache\\ntrainDataset.count\\ntestDataset.count\", \"outputs\": [{\"execution_count\": 8, \"output_type\": \"execute_result\", \"data\": {\"text/plain\": \"34773\"}, \"metadata\": {}}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"### Feature selection\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"source\": \"Encode the destination using one-hot encoding and include the columns Year, Month, DayofMonth, DayOfWeek, Distance\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 9, \"cell_type\": \"code\", \"source\": \"import org.apache.spark.ml.feature.{OneHotEncoder, StringIndexer, VectorAssembler}\\n\\nval indexer = new StringIndexer().setInputCol(\\\"Dest\\\").setOutputCol(\\\"DestIndex\\\") // .setHandleInvalid(\\\"skip\\\") // Only works on Spark 1.6 or later\\nval encoder = new OneHotEncoder().setInputCol(\\\"DestIndex\\\").setOutputCol(\\\"DestVec\\\")\\nval assembler = new VectorAssembler().setInputCols(Array(\\\"Year\\\",\\\"Month\\\",\\\"DayofMonth\\\",\\\"DayOfWeek\\\",\\\"Distance\\\",\\\"DestVec\\\")).setOutputCol(\\\"features\\\")\", \"outputs\": [], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"### Build the model: Use SystemML's MLPipeline wrapper. \\n\\nThis wrapper invokes MultiLogReg.dml (for training) and GLM-predict.dml (for prediction). These DML algorithms are available at https://github.com/apache/incubator-systemml/tree/master/scripts/algorithms\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 10, \"cell_type\": \"code\", \"source\": \"import org.apache.spark.ml.Pipeline\\nimport org.apache.sysml.api.ml.LogisticRegression\\n\\nval lr = new LogisticRegression(\\\"log\\\", sc).setRegParam(1e-4).setTol(1e-2).setMaxInnerIter(0).setMaxOuterIter(100)\\n\\nval pipeline = new Pipeline().setStages(Array(indexer, encoder, assembler, lr))\\nval model = pipeline.fit(trainDataset)\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"BEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 56433.27085246851, Gradient Norm = 4.469119635504498E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 9262.13484840509, Predicted = 8912.05664442707 (A/P: 1.0393), Trust Delta = 4.1513539310828525E-4\\n -- New Objective = 47171.13600406342, Beta Change Norm = 3.9882828705797336E-4, Gradient Norm = 3491408.311614066\\n \\n-- Outer Iteration 2: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 107.11137476684962, Predicted = 105.31921188128369 (A/P: 1.017), Trust Delta = 4.1513539310828525E-4\\n -- New Objective = 47064.02462929657, Beta Change Norm = 1.0302143846288746E-4, Gradient Norm = 84892.35372269012\\nTermination / Convergence condition satisfied.\\n\"}], \"metadata\": {\"scrolled\": true, \"collapsed\": false, \"trusted\": true}}, {\"source\": \"### Evaluate the model \\n\\nOutput RMS error on test data\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 11, \"cell_type\": \"code\", \"source\": \"val predictions = model.transform(testDataset.withColumnRenamed(\\\"label\\\", \\\"OriginalLabel\\\"))\\npredictions.select(\\\"prediction\\\", \\\"OriginalLabel\\\").show\\nsqlContext.udf.register(\\\"square\\\", (x:Double) => Math.pow(x, 2.0))\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"+----------+-------------+\\n|prediction|OriginalLabel|\\n+----------+-------------+\\n| 1.0| 2.0|\\n| 1.0| 1.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 1.0|\\n| 1.0| 2.0|\\n| 1.0| 1.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 1.0|\\n| 1.0| 2.0|\\n| 1.0| 2.0|\\n| 1.0| 1.0|\\n| 1.0| 1.0|\\n| 1.0| 1.0|\\n+----------+-------------+\\nonly showing top 20 rows\\n\\n\"}, {\"execution_count\": 11, \"output_type\": \"execute_result\", \"data\": {\"text/plain\": \"UserDefinedFunction(<function1>,DoubleType,List())\"}, \"metadata\": {}}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"execution_count\": 12, \"cell_type\": \"code\", \"source\": \"predictions.registerTempTable(\\\"predictions\\\")\\nsqlContext.sql(\\\"SELECT sqrt(avg(square(OriginalLabel - prediction))) FROM predictions\\\").show\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"+------------------+\\n| _c0|\\n+------------------+\\n|0.8557362892866146|\\n+------------------+\\n\\n\"}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"### Perform k-fold cross-validation to tune the hyperparameters\\n\\nPerform cross-validation to tune the regularization parameter for Logistic regression.\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 13, \"cell_type\": \"code\", \"source\": \"import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator\\nimport org.apache.spark.ml.tuning.{ParamGridBuilder, CrossValidator}\\n\\nval crossval = new CrossValidator().setEstimator(pipeline).setEvaluator(new BinaryClassificationEvaluator)\\nval paramGrid = new ParamGridBuilder().addGrid(lr.regParam, Array(0.1, 1e-3, 1e-6)).build()\\ncrossval.setEstimatorParamMaps(paramGrid)\\ncrossval.setNumFolds(2) // Setting k = 2\\nval cvmodel = crossval.fit(trainDataset)\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"BEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 28202.772482623055, Gradient Norm = 2.221087060254761E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 4576.927438869821, Predicted = 4405.651264293149 (A/P: 1.0389), Trust Delta = 4.127578309122139E-4\\n -- New Objective = 23625.845043753234, Beta Change Norm = 3.9671126297839183E-4, Gradient Norm = 1718538.331150294\\nTermination / Convergence condition satisfied.\\nBEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 28202.772482623055, Gradient Norm = 2.221087060254761E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 4576.927438878782, Predicted = 4405.651264300938 (A/P: 1.0389), Trust Delta = 4.127578309130283E-4\\n -- New Objective = 23625.845043744273, Beta Change Norm = 3.967112629790933E-4, Gradient Norm = 1718538.3311583179\\n \\n-- Outer Iteration 2: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 52.06267761322306, Predicted = 51.207226997373795 (A/P: 1.0167), Trust Delta = 4.127578309130283E-4\\n -- New Objective = 23573.78236613105, Beta Change Norm = 1.0195505438829344E-4, Gradient Norm = 41072.985998067124\\n \\n-- Outer Iteration 3: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.03776156834283029, Predicted = 0.037741389955733964 (A/P: 1.0005), Trust Delta = 4.127578309130283E-4\\n -- New Objective = 23573.744604562708, Beta Change Norm = 3.3257729178954336E-6, Gradient Norm = 3559.0088415221207\\nTermination / Convergence condition satisfied.\\nBEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 28202.772482623055, Gradient Norm = 2.221087060254761E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 4576.927438878873, Predicted = 4405.651264301018 (A/P: 1.0389), Trust Delta = 4.1275783091303654E-4\\n -- New Objective = 23625.845043744182, Beta Change Norm = 3.9671126297910036E-4, Gradient Norm = 1718538.331158408\\n \\n-- Outer Iteration 2: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 52.062677613230335, Predicted = 51.20722699738286 (A/P: 1.0167), Trust Delta = 4.1275783091303654E-4\\n -- New Objective = 23573.782366130952, Beta Change Norm = 1.0195505438831547E-4, Gradient Norm = 41072.98599806662\\n \\n-- Outer Iteration 3: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.03776156833919231, Predicted = 0.037741389955751575 (A/P: 1.0005), Trust Delta = 4.1275783091303654E-4\\n -- New Objective = 23573.744604562613, Beta Change Norm = 3.3257729178972746E-6, Gradient Norm = 3559.008841523661\\n \\n-- Outer Iteration 4: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 1.3742707646742929, Predicted = 1.374282851981874 (A/P: 1.0), Trust Delta = 0.0016510313236521462\\n -- New Objective = 23572.37033379794, Beta Change Norm = 4.1275783091303654E-4, Gradient Norm = 23218.782943544382\\n \\n-- Outer Iteration 5: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 5.475667862796399, Predicted = 5.475595423716493 (A/P: 1.0), Trust Delta = 0.006604125294608585\\n -- New Objective = 23566.894665935142, Beta Change Norm = 0.0016510313236521464, Gradient Norm = 3400.306136071355\\n \\n-- Outer Iteration 6: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 19.796611347293947, Predicted = 19.796668922654057 (A/P: 1.0), Trust Delta = 0.02641650117843434\\n -- New Objective = 23547.09805458785, Beta Change Norm = 0.006604125294608585, Gradient Norm = 12384.979229404262\\n \\n-- Outer Iteration 7: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 48.9038754012945, Predicted = 48.86479486479853 (A/P: 1.0008), Trust Delta = 0.039975464358656405\\n -- New Objective = 23498.194179186554, Beta Change Norm = 0.026416501178434335, Gradient Norm = 25887.667183269536\\n \\n-- Outer Iteration 8: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 0.007870123248721939, Predicted = 0.007868226951946769 (A/P: 1.0002), Trust Delta = 0.039975464358656405\\n -- New Objective = 23498.186309063305, Beta Change Norm = 6.078745447586554E-7, Gradient Norm = 1345.8027775103888\\n \\n-- Outer Iteration 9: Had 5 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 25.04238552428069, Predicted = 25.024767443519863 (A/P: 1.0007), Trust Delta = 0.0405590959281579\\n -- New Objective = 23473.143923539024, Beta Change Norm = 0.039975464358656405, Gradient Norm = 63769.52436782582\\n \\n-- Outer Iteration 10: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 0.04773861860303441, Predicted = 0.04771039962536379 (A/P: 1.0006), Trust Delta = 0.0405590959281579\\n -- New Objective = 23473.09618492042, Beta Change Norm = 1.4963385754664812E-6, Gradient Norm = 720.8018323328566\\n \\n-- Outer Iteration 11: Had 5 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 8.123822556943196, Predicted = 8.128868676639112 (A/P: 0.9994), Trust Delta = 0.10966765508915642\\n -- New Objective = 23464.972362363478, Beta Change Norm = 0.040559095928157894, Gradient Norm = 72691.91595482397\\n \\n-- Outer Iteration 12: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 0.06196295309564448, Predicted = 0.061921093377362 (A/P: 1.0007), Trust Delta = 0.10966765508915642\\n -- New Objective = 23464.910399410383, Beta Change Norm = 1.7036583109418734E-6, Gradient Norm = 482.30416635512506\\n \\n-- Outer Iteration 13: Had 6 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 17.71440401360087, Predicted = 17.616303961789683 (A/P: 1.0056), Trust Delta = 0.16941777360208057\\n -- New Objective = 23447.19599539678, Beta Change Norm = 0.10966765508915642, Gradient Norm = 448422.2320019876\\n \\n-- Outer Iteration 14: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 2.386916461367946, Predicted = 2.397254649433668 (A/P: 0.9957), Trust Delta = 0.16941777360208057\\n -- New Objective = 23444.809078935414, Beta Change Norm = 1.0691952710422448E-5, Gradient Norm = 2940.4721234861527\\n \\n-- Outer Iteration 15: Had 4 CG iterations\\n -- Obj.Reduction: Actual = 4.294265273932979, Predicted = 4.301599925371988 (A/P: 0.9983), Trust Delta = 0.16941777360208057\\n -- New Objective = 23440.51481366148, Beta Change Norm = 0.018008719957742635, Gradient Norm = 4590.1170762087395\\n \\n-- Outer Iteration 16: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 2.4845889129210263E-4, Predicted = 2.4844829761319425E-4 (A/P: 1.0), Trust Delta = 0.16941777360208057\\n -- New Objective = 23440.51456520259, Beta Change Norm = 1.0825357762700158E-7, Gradient Norm = 280.5707172598387\\n \\n-- Outer Iteration 17: Had 8 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 22.440803682489786, Predicted = 22.42170069553472 (A/P: 1.0009), Trust Delta = 0.2496076412979077\\n -- New Objective = 23418.0737615201, Beta Change Norm = 0.16941777360208057, Gradient Norm = 37677.05806399844\\n \\n-- Outer Iteration 18: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.15241017882726737, Predicted = 0.15239595431754965 (A/P: 1.0001), Trust Delta = 0.2496076412979077\\n -- New Objective = 23417.921351341272, Beta Change Norm = 8.477249180981066E-6, Gradient Norm = 707.427496995126\\n \\n-- Outer Iteration 19: Had 8 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 36.817799356838805, Predicted = 36.84419020002096 (A/P: 0.9993), Trust Delta = 0.3890684157185231\\n -- New Objective = 23381.103551984434, Beta Change Norm = 0.2496076412979077, Gradient Norm = 181659.30511599063\\n \\n-- Outer Iteration 20: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 3.9036142495642707, Predicted = 3.907242243615839 (A/P: 0.9991), Trust Delta = 0.3890684157185231\\n -- New Objective = 23377.19993773487, Beta Change Norm = 4.3252276508826854E-5, Gradient Norm = 4562.596683929567\\n \\n-- Outer Iteration 21: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 2.4621394186397083E-4, Predicted = 2.462032554160668E-4 (A/P: 1.0), Trust Delta = 0.3890684157185231\\n -- New Objective = 23377.199691520927, Beta Change Norm = 1.0792242771895522E-7, Gradient Norm = 293.5155793389021\\n \\n-- Outer Iteration 22: Had 8 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 32.60430984508639, Predicted = 32.63142558199526 (A/P: 0.9992), Trust Delta = 0.6911480264449816\\n -- New Objective = 23344.59538167584, Beta Change Norm = 0.38906841571852313, Gradient Norm = 13358.735388646046\\n \\n-- Outer Iteration 23: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 0.0021210133490967564, Predicted = 0.002120754723733256 (A/P: 1.0001), Trust Delta = 0.6911480264449816\\n -- New Objective = 23344.593260662492, Beta Change Norm = 3.175083062930857E-7, Gradient Norm = 969.5458081582332\\n \\n-- Outer Iteration 24: Had 6 CG iterations\\n -- Obj.Reduction: Actual = 1.0072309033639613, Predicted = 1.0078398039430247 (A/P: 0.9994), Trust Delta = 0.6911480264449816\\n -- New Objective = 23343.586029759128, Beta Change Norm = 0.008749259137025917, Gradient Norm = 1067.7896535923433\\n \\n-- Outer Iteration 25: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 1.3547600246965885E-5, Predicted = 1.3547465425594469E-5 (A/P: 1.0), Trust Delta = 0.6911480264449816\\n -- New Objective = 23343.586016211528, Beta Change Norm = 2.5374783095185467E-8, Gradient Norm = 83.20291366858535\\n \\n-- Outer Iteration 26: Had 12 CG iterations\\n -- Obj.Reduction: Actual = 15.302215361618437, Predicted = 15.310868474305936 (A/P: 0.9994), Trust Delta = 0.6911480264449816\\n -- New Objective = 23328.28380084991, Beta Change Norm = 0.5120342239089952, Gradient Norm = 15756.152919911565\\n \\n-- Outer Iteration 27: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 0.0029535907960962504, Predicted = 0.002953150612459315 (A/P: 1.0001), Trust Delta = 0.6911480264449816\\n -- New Objective = 23328.280847259113, Beta Change Norm = 3.74856810221399E-7, Gradient Norm = 933.6635694330404\\n \\n-- Outer Iteration 28: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 1.0478267358848825E-4, Predicted = 1.0478219919535331E-4 (A/P: 1.0), Trust Delta = 0.6911480264449816\\n -- New Objective = 23328.28074247644, Beta Change Norm = 2.2480413822676833E-7, Gradient Norm = 5.538385572102319\\nTermination / Convergence condition satisfied.\\nBEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 28230.498369845453, Gradient Norm = 2.248032584752783E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 4685.514381090245, Predicted = 4506.656096079343 (A/P: 1.0397), Trust Delta = 4.1751229311831877E-4\\n -- New Objective = 23544.983988755208, Beta Change Norm = 4.0094223959613487E-4, Gradient Norm = 1773112.5532909825\\nTermination / Convergence condition satisfied.\\nBEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 28230.498369845453, Gradient Norm = 2.248032584752783E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 4685.51438109942, Predicted = 4506.6560960873 (A/P: 1.0397), Trust Delta = 4.17512293119143E-4\\n -- New Objective = 23544.983988746033, Beta Change Norm = 4.0094223959684285E-4, Gradient Norm = 1773112.553299248\\n \\n-- Outer Iteration 2: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 55.08478867724625, Predicted = 54.14637164341834 (A/P: 1.0173), Trust Delta = 4.17512293119143E-4\\n -- New Objective = 23489.899200068787, Beta Change Norm = 1.0409436207463608E-4, Gradient Norm = 43863.264421495034\\n \\n-- Outer Iteration 3: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.0425455416625482, Predicted = 0.0425210724118125 (A/P: 1.0006), Trust Delta = 4.17512293119143E-4\\n -- New Objective = 23489.856654527124, Beta Change Norm = 3.4860035525762597E-6, Gradient Norm = 3473.0626928235138\\nTermination / Convergence condition satisfied.\\nBEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 28230.498369845453, Gradient Norm = 2.248032584752783E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 4685.514381099514, Predicted = 4506.65609608738 (A/P: 1.0397), Trust Delta = 4.175122931191516E-4\\n -- New Objective = 23544.98398874594, Beta Change Norm = 4.0094223959685E-4, Gradient Norm = 1773112.5532993283\\n \\n-- Outer Iteration 2: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 55.08478867725353, Predicted = 54.14637164342853 (A/P: 1.0173), Trust Delta = 4.175122931191516E-4\\n -- New Objective = 23489.899200068685, Beta Change Norm = 1.0409436207466114E-4, Gradient Norm = 43863.264421514185\\n \\n-- Outer Iteration 3: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.0425455416625482, Predicted = 0.04252107241182405 (A/P: 1.0006), Trust Delta = 4.175122931191516E-4\\n -- New Objective = 23489.856654527022, Beta Change Norm = 3.486003552576232E-6, Gradient Norm = 3473.0626928274914\\n \\n-- Outer Iteration 4: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 1.3618165665211563, Predicted = 1.3618300786307123 (A/P: 1.0), Trust Delta = 0.0016700491724766064\\n -- New Objective = 23488.4948379605, Beta Change Norm = 4.1751229311915155E-4, Gradient Norm = 22750.17168667339\\n \\n-- Outer Iteration 5: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 5.399070530791505, Predicted = 5.398983505048864 (A/P: 1.0), Trust Delta = 0.006680196689906426\\n -- New Objective = 23483.09576742971, Beta Change Norm = 0.0016700491724766064, Gradient Norm = 3277.243187563727\\n \\n-- Outer Iteration 6: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 19.04347611745834, Predicted = 19.043530665204045 (A/P: 1.0), Trust Delta = 0.026720786759625702\\n -- New Objective = 23464.05229131225, Beta Change Norm = 0.006680196689906425, Gradient Norm = 12014.210859652962\\n \\n-- Outer Iteration 7: Had 3 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 41.1452816738456, Predicted = 41.09983187966176 (A/P: 1.0011), Trust Delta = 0.03287099410333282\\n -- New Objective = 23422.907009638406, Beta Change Norm = 0.0267207867596257, Gradient Norm = 30568.57509207747\\n \\n-- Outer Iteration 8: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 0.011022901580872713, Predicted = 0.01101972206380871 (A/P: 1.0003), Trust Delta = 0.03287099410333282\\n -- New Objective = 23422.895986736825, Beta Change Norm = 7.209836919526366E-7, Gradient Norm = 1251.6678613601161\\n \\n-- Outer Iteration 9: Had 8 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 13.978709434930352, Predicted = 13.974847661855666 (A/P: 1.0003), Trust Delta = 0.033257209609599145\\n -- New Objective = 23408.917277301895, Beta Change Norm = 0.03287099410333282, Gradient Norm = 15328.859090870203\\n \\n-- Outer Iteration 10: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.004639191432943335, Predicted = 0.004638318429644279 (A/P: 1.0002), Trust Delta = 0.033257209609599145\\n -- New Objective = 23408.91263811046, Beta Change Norm = 1.0519781798129972E-6, Gradient Norm = 335.02440722968106\\n \\n-- Outer Iteration 11: Had 4 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 6.3662166226313275, Predicted = 6.366164181244294 (A/P: 1.0), Trust Delta = 0.06697443441569934\\n -- New Objective = 23402.54642148783, Beta Change Norm = 0.033257209609599145, Gradient Norm = 2307.51433331859\\n \\n-- Outer Iteration 12: Had 7 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 11.15761233725425, Predicted = 11.149031539741129 (A/P: 1.0008), Trust Delta = 0.10211243265236637\\n -- New Objective = 23391.388809150576, Beta Change Norm = 0.06697443441569932, Gradient Norm = 71503.76594916714\\n \\n-- Outer Iteration 13: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.600488582651451, Predicted = 0.6001508149708464 (A/P: 1.0006), Trust Delta = 0.10211243265236637\\n -- New Objective = 23390.788320567925, Beta Change Norm = 1.6834966454979097E-5, Gradient Norm = 840.347770623361\\n \\n-- Outer Iteration 14: Had 8 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 19.757560698417365, Predicted = 19.765740859017424 (A/P: 0.9996), Trust Delta = 0.24398632984391763\\n -- New Objective = 23371.030759869507, Beta Change Norm = 0.10211243265236637, Gradient Norm = 48752.608649999434\\n \\n-- Outer Iteration 15: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.2778570437403687, Predicted = 0.2779044747609064 (A/P: 0.9998), Trust Delta = 0.24398632984391763\\n -- New Objective = 23370.752902825767, Beta Change Norm = 1.1465782794751552E-5, Gradient Norm = 490.74546662109907\\n \\n-- Outer Iteration 16: Had 7 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 35.87021488765458, Predicted = 35.87139479548606 (A/P: 1.0), Trust Delta = 0.5998608188063514\\n -- New Objective = 23334.882687938112, Beta Change Norm = 0.24398632984391766, Gradient Norm = 114111.92221839691\\n \\n-- Outer Iteration 17: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 1.5378956803469919, Predicted = 1.5387644534721423 (A/P: 0.9994), Trust Delta = 0.5998608188063514\\n -- New Objective = 23333.344792257765, Beta Change Norm = 2.7062912410241883E-5, Gradient Norm = 1827.5390228667288\\n \\n-- Outer Iteration 18: Had 8 CG iterations, trust bound REACHED\\n -- Obj.Reduction: Actual = 55.357956099222065, Predicted = 55.4569565918232 (A/P: 0.9982), Trust Delta = 0.8894009952541146\\n -- New Objective = 23277.986836158543, Beta Change Norm = 0.5998608188063514, Gradient Norm = 30684.985380679016\\n \\n-- Outer Iteration 19: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 0.017656232350418577, Predicted = 0.017644837185100737 (A/P: 1.0006), Trust Delta = 0.8894009952541146\\n -- New Objective = 23277.969179926193, Beta Change Norm = 1.984483688888249E-6, Gradient Norm = 137.4544897991739\\n \\n-- Outer Iteration 20: Had 10 CG iterations\\n -- Obj.Reduction: Actual = 13.663528841007064, Predicted = 13.567360160458493 (A/P: 1.0071), Trust Delta = 0.8894009952541146\\n -- New Objective = 23264.305651085186, Beta Change Norm = 0.4790943358344082, Gradient Norm = 15753.857353150117\\n \\n-- Outer Iteration 21: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 0.002973383649077732, Predicted = 0.002972929227391132 (A/P: 1.0002), Trust Delta = 0.8894009952541146\\n -- New Objective = 23264.302677701537, Beta Change Norm = 3.774223875140864E-7, Gradient Norm = 1264.8256951027395\\n \\n-- Outer Iteration 22: Had 2 CG iterations\\n -- Obj.Reduction: Actual = 1.9038948812521994E-4, Predicted = 1.9038853266221582E-4 (A/P: 1.0), Trust Delta = 0.8894009952541146\\n -- New Objective = 23264.30248731205, Beta Change Norm = 3.019597152404477E-7, Gradient Norm = 10.843636813611397\\nTermination / Convergence condition satisfied.\\nBEGIN MULTINOMIAL LOGISTIC REGRESSION SCRIPT\\nReading X...\\nReading Y...\\n-- Initially: Objective = 56433.27085246851, Gradient Norm = 4.469119635504498E7, Trust Delta = 0.001024586722033724\\n-- Outer Iteration 1: Had 1 CG iterations\\n -- Obj.Reduction: Actual = 9262.134848396847, Predicted = 8912.05664441991 (A/P: 1.0393), Trust Delta = 4.151353931079128E-4\\n -- New Objective = 47171.13600407166, Beta Change Norm = 3.9882828705765304E-4, Gradient Norm = 3491408.3116066065\\nTermination / Convergence condition satisfied.\\n\"}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"### Evaluate the cross-validated model\", \"cell_type\": \"markdown\", \"metadata\": {}}, {\"execution_count\": 1, \"cell_type\": \"code\", \"source\": \"val cvpredictions = cvmodel.transform(testDataset.withColumnRenamed(\\\"label\\\", \\\"OriginalLabel\\\"))\\ncvpredictions.registerTempTable(\\\"cvpredictions\\\")\\nsqlContext.sql(\\\"SELECT sqrt(avg(square(OriginalLabel - prediction))) FROM cvpredictions\\\").show\", \"outputs\": [{\"output_type\": \"stream\", \"name\": \"stdout\", \"text\": \"+------------------+\\n| _c0|\\n+------------------+\\n|0.8557362892866146|\\n+------------------+\\n\\n\"}], \"metadata\": {\"collapsed\": false, \"trusted\": true}}, {\"source\": \"## Homework ;)\\n\\nRead http://apache.github.io/incubator-systemml/algorithms-classification.html#multinomial-logistic-regression and perform cross validation on other hyperparameters: for example: icpt, tol, maxOuterIter, maxInnerIter\", \"cell_type\": \"markdown\", \"metadata\": {}}], \"nbformat\": 4, \"metadata\": {\"kernelspec\": {\"display_name\": \"Scala 2.10.4 (Spark 1.5.2)\", \"name\": \"spark\", \"language\": \"scala\"}, \"language_info\": {\"name\": \"scala\"}}}\n\\ No newline at end of file\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1606] Update notebook samples with latest code
Delayed "Flight Delay Prediction" notebook temporarily due to issues. |
49,717 | 17.05.2017 18:46:21 | 25,200 | c3aeb48bf6b54febb861b7b4381c3d7af450a8e8 | [HOTFIX] for sparse GPU transpose | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"new_path": "src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java",
"diff": "@@ -52,6 +52,7 @@ import static jcuda.jcudnn.cudnnDataType.CUDNN_DATA_DOUBLE;\nimport static jcuda.jcudnn.cudnnNanPropagation.CUDNN_PROPAGATE_NAN;\nimport static jcuda.jcudnn.cudnnPoolingMode.CUDNN_POOLING_MAX;\nimport static jcuda.jcudnn.cudnnTensorFormat.CUDNN_TENSOR_NCHW;\n+import static jcuda.jcusparse.JCusparse.cusparseDcsr2csc;\nimport static jcuda.jcusparse.JCusparse.cusparseDcsrgemm;\nimport static jcuda.jcusparse.JCusparse.cusparseDcsrmv;\nimport static jcuda.jcusparse.cusparseOperation.CUSPARSE_OPERATION_NON_TRANSPOSE;\n@@ -61,6 +62,8 @@ import static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToDevice;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyDeviceToHost;\nimport static jcuda.runtime.cudaMemcpyKind.cudaMemcpyHostToDevice;\n+import jcuda.jcusparse.cusparseAction;\n+import jcuda.jcusparse.cusparseIndexBase;\nimport org.apache.commons.logging.Log;\nimport org.apache.commons.logging.LogFactory;\nimport org.apache.sysml.api.DMLScript;\n@@ -2732,7 +2735,7 @@ public class LibMatrixCUDA {\n* Performs sparse and dense dgeam given two input matrices\n* C = alpha* op( A ) + beta* op ( B )\n* where op = transpose or not (specified by isLeftTransposed and isRightTransposed).\n- *\n+ * To indicate a transpose operation, make sure in1 == in2 and isLeftTransposed == isRightTransposed == true\n* @param ec execution context\n* @param gCtx a valid {@link GPUContext}\n* @param instName the invoking instruction's name for record {@link Statistics}.\n@@ -2756,35 +2759,6 @@ public class LibMatrixCUDA {\nint transa = isLeftTransposed ? CUBLAS_OP_T : CUBLAS_OP_N;\nint transb = isRightTransposed ? CUBLAS_OP_T : CUBLAS_OP_N;\n- int lda = (int) in1.getNumColumns();\n- int ldb = (int) in2.getNumColumns();\n- int m = (int) in1.getNumColumns();\n- int n = (int) in2.getNumRows();\n- if (isLeftTransposed && isRightTransposed) {\n- m = (int) in1.getNumRows();\n- n = (int) in2.getNumColumns();\n- }\n- else if (isLeftTransposed) {\n- m = (int) in1.getNumRows();\n- } else if (isRightTransposed) {\n- n = (int) in2.getNumColumns();\n- }\n- int ldc = m;\n-\n-\n-\n- /**\n- int m = (int) in1.getNumRows();\n- int n = (int) in1.getNumColumns();\n- if(!isLeftTransposed && isRightTransposed) {\n- m = (int) in1.getNumColumns();\n- n = (int) in1.getNumRows();\n- }\n- int lda = isLeftTransposed ? n : m;\n- int ldb = isRightTransposed ? n : m;\n- int ldc = m;\n- **/\n-\nMatrixObject out = ec.getMatrixObject(outputName);\nboolean isSparse1 = isInSparseFormat(gCtx, in1);\nboolean isSparse2 = isInSparseFormat(gCtx, in2);\n@@ -2792,39 +2766,83 @@ public class LibMatrixCUDA {\nlong t0=0,t1=0;\n// TODO: Implement sparse-dense matrix cublasDgeam kernel\nif(isSparse1 || isSparse2) {\n+ int m = (int)in1.getNumRows();\n+ int n = (int)in1.getNumColumns();\n// Invoke cuSparse when either are in sparse format\n// Perform sparse-sparse dgeam\nif (!isInSparseFormat(gCtx, in1)) {\n- if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ t0 = System.nanoTime();\nin1.getGPUObject(gCtx).denseToSparse();\n- if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_DENSE_TO_SPARSE, System.nanoTime() - t0);\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_DENSE_TO_SPARSE,\n+ System.nanoTime() - t0);\n}\nCSRPointer A = in1.getGPUObject(gCtx).getJcudaSparseMatrixPtr();\nif (!isInSparseFormat(gCtx, in2)) {\n- if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ t0 = System.nanoTime();\nin2.getGPUObject(gCtx).denseToSparse();\n- if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_DENSE_TO_SPARSE, System.nanoTime() - t0);\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_DENSE_TO_SPARSE,\n+ System.nanoTime() - t0);\n}\nCSRPointer B = in2.getGPUObject(gCtx).getJcudaSparseMatrixPtr();\nec.allocateGPUMatrixObject(outputName);\n+ out.getGPUObject(gCtx).addReadLock();\n- if (GPUStatistics.DISPLAY_STATISTICS) t1 = System.nanoTime();\n+ if (in1 == in2 && isLeftTransposed == true && isLeftTransposed == isRightTransposed) {\n+ // Special case for transpose\n+\n+ int nnz = (int)A.nnz;\n+ CSRPointer C = CSRPointer.allocateEmpty(gCtx, nnz, n);\n+ out.getGPUObject(gCtx).setSparseMatrixCudaPointer(C);\n+ cusparseDcsr2csc(getCusparseHandle(gCtx), m, n, nnz, A.val, A.rowPtr, A.colInd, C.val, C.colInd, C.rowPtr, cusparseAction.CUSPARSE_ACTION_NUMERIC, cusparseIndexBase.CUSPARSE_INDEX_BASE_ZERO);\n+ } else {\n+ // General case (cusparse does not support accept the transpose operator for dgeam)\n+ // TODO: to implement the transposed + dgeam for sparse matrices, they need to be converted to csc, which is effectively a tranpose\n+ if (isLeftTransposed || isRightTransposed) {\n+ throw new DMLRuntimeException(\n+ \"Transpose in cusparseDcsrgeam not supported for sparse matrices on GPU\");\n+ }\n+\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ t1 = System.nanoTime();\nCSRPointer C = CSRPointer.allocateForDgeam(gCtx, getCusparseHandle(gCtx), A, B, m, n);\n- if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_SPARSE_ALLOCATE_LIB, System.nanoTime() - t1);\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_SPARSE_ALLOCATE_LIB,\n+ System.nanoTime() - t1);\nout.getGPUObject(gCtx).setSparseMatrixCudaPointer(C);\n//long sizeOfC = CSRPointer.estimateSize(C.nnz, out.getNumRows());\n- out.getGPUObject(gCtx).addReadLock();\n- if (GPUStatistics.DISPLAY_STATISTICS) t0 = System.nanoTime();\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ t0 = System.nanoTime();\nJCusparse.cusparseDcsrgeam(getCusparseHandle(gCtx), m, n, alphaPtr, A.descr, (int) A.nnz, A.val, A.rowPtr, A.colInd, betaPtr,\n- B.descr, (int)B.nnz, B.val, B.rowPtr, B.colInd,\n- C.descr, C.val, C.rowPtr, C.colInd);\n+ B.descr, (int) B.nnz, B.val, B.rowPtr, B.colInd, C.descr, C.val, C.rowPtr, C.colInd);\n//cudaDeviceSynchronize;\n- if (GPUStatistics.DISPLAY_STATISTICS) GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_SPARSE_DGEAM_LIB, System.nanoTime() - t0);\n+ if (GPUStatistics.DISPLAY_STATISTICS)\n+ GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_SPARSE_DGEAM_LIB,\n+ System.nanoTime() - t0);\n}\n- else {\n+ } else {\n// Dense-Dense dgeam\n+\n+ int lda = (int) in1.getNumColumns();\n+ int ldb = (int) in2.getNumColumns();\n+ int m = (int) in1.getNumColumns();\n+ int n = (int) in2.getNumRows();\n+ if (isLeftTransposed && isRightTransposed) {\n+ m = (int) in1.getNumRows();\n+ n = (int) in2.getNumColumns();\n+ }\n+ else if (isLeftTransposed) {\n+ m = (int) in1.getNumRows();\n+ } else if (isRightTransposed) {\n+ n = (int) in2.getNumColumns();\n+ }\n+ int ldc = m;\n+\nPointer A = getDensePointer(gCtx, in1, instName);\nPointer B = getDensePointer(gCtx, in2, instName);\ngetDenseMatrixOutputForGPUInstruction(ec, instName, outputName); // Allocated the dense output matrix\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX] for sparse GPU transpose |
49,737 | 19.05.2017 14:01:07 | 25,200 | a6428f7d88b39792ccb5440b5bc3bedff4df21de | Change the term PLAIN_R2 to R2
Closes | [
{
"change_type": "MODIFY",
"old_path": "docs/hadoop-batch-mode.md",
"new_path": "docs/hadoop-batch-mode.md",
"diff": "@@ -760,11 +760,11 @@ Let's go ahead and run the SystemML example from the GitHub README.\nAVG_RES_Y,1.5905895170230406E-10\nSTDEV_RES_Y,2.0668015575844624E-8\nDISPERSION,4.262683023432828E-16\n- PLAIN_R2,1.0\n+ R2,1.0\nADJUSTED_R2,1.0\n- PLAIN_R2_NOBIAS,1.0\n+ R2_NOBIAS,1.0\nADJUSTED_R2_NOBIAS,1.0\n- PLAIN_R2_VS_0,1.0\n+ R2_VS_0,1.0\nADJUSTED_R2_VS_0,1.0\nWriting the output matrix...\nEND LINEAR REGRESSION SCRIPT\n@@ -795,9 +795,9 @@ Let's go ahead and run the SystemML example from the GitHub README.\nAVG_RES_Y,1,,2.5577864570734575E-10\nSTDEV_RES_Y,1,,2.390848397359923E-8\nPRED_STDEV_RES,1,TRUE,1.0\n- PLAIN_R2,1,,1.0\n+ R2,1,,1.0\nADJUSTED_R2,1,,1.0\n- PLAIN_R2_NOBIAS,1,,1.0\n+ R2_NOBIAS,1,,1.0\nADJUSTED_R2_NOBIAS,1,,1.0\n15/11/17 15:51:17 INFO api.DMLScript: SystemML Statistics:\nTotal execution time: 0.269 sec.\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/standalone-guide.md",
"new_path": "docs/standalone-guide.md",
"diff": "@@ -527,11 +527,11 @@ The LinearRegDS.dml script generates statistics to standard output similar to th\nAVG_RES_Y,-3.3127468704080085E-10\nSTDEV_RES_Y,1.7231785003947183E-8\nDISPERSION,2.963950542926297E-16\n- PLAIN_R2,1.0\n+ R2,1.0\nADJUSTED_R2,1.0\n- PLAIN_R2_NOBIAS,1.0\n+ R2_NOBIAS,1.0\nADJUSTED_R2_NOBIAS,1.0\n- PLAIN_R2_VS_0,1.0\n+ R2_VS_0,1.0\nADJUSTED_R2_VS_0,1.0\nWriting the output matrix...\nEND LINEAR REGRESSION SCRIPT\n@@ -572,9 +572,9 @@ This generates statistics similar to the following to standard output.\nAVG_RES_Y,1,,-4.1450397073455047E-10\nSTDEV_RES_Y,1,,2.0519206226041048E-8\nPRED_STDEV_RES,1,TRUE,1.0\n- PLAIN_R2,1,,1.0\n+ R2,1,,1.0\nADJUSTED_R2,1,,1.0\n- PLAIN_R2_NOBIAS,1,,1.0\n+ R2_NOBIAS,1,,1.0\nADJUSTED_R2_NOBIAS,1,,1.0\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/GLM-predict.dml",
"new_path": "scripts/algorithms/GLM-predict.dml",
"diff": "# AVG_RES_Y + Average of column residual, i.e. of Y - mean(Y|X)\n# STDEV_RES_Y + St.Dev. of column residual, i.e. of Y - mean(Y|X)\n# PRED_STDEV_RES + + Model-predicted St.Dev. of column residual\n-# PLAIN_R2 + Plain R^2 of Y column residual with bias included\n+# R2 + R^2 of Y column residual with bias included\n# ADJUSTED_R2 + Adjusted R^2 of Y column residual with bias included\n-# PLAIN_R2_NOBIAS + Plain R^2 of Y column residual with bias subtracted\n+# R2_NOBIAS + R^2 of Y column residual with bias subtracted\n# ADJUSTED_R2_NOBIAS + Adjusted R^2 of Y column residual with bias subtracted\n# ---------------------------------------------------------------------------------------------\n#\n@@ -284,9 +284,9 @@ if (fileY != \" \")\n} else {\nvar_res_Y = matrix (0.0, rows = 1, cols = ncol (Y)) / 0.0;\n}\n- plain_R2_nobias = 1 - ss_avg_res_Y / ss_avg_tot_Y;\n+ R2_nobias = 1 - ss_avg_res_Y / ss_avg_tot_Y;\nadjust_R2_nobias = 1 - var_res_Y / var_tot_Y;\n- plain_R2 = 1 - ss_res_Y / ss_avg_tot_Y;\n+ R2 = 1 - ss_res_Y / ss_avg_tot_Y;\nif (df_ss_res_Y > 0) {\nadjust_R2 = 1 - (ss_res_Y / df_ss_res_Y) / var_tot_Y;\n} else {\n@@ -320,9 +320,9 @@ if (fileY != \" \")\nstr = append (str, \"AVG_RES_Y,\" + i + \",,\" + as.scalar (avg_res_Y [1, i]));\nstr = append (str, \"STDEV_RES_Y,\" + i + \",,\" + as.scalar (sqrt (var_res_Y [1, i])));\nstr = append (str, \"PRED_STDEV_RES,\" + i + \",TRUE,\" + as.scalar (sqrt (predicted_avg_var_res_Y [1, i])));\n- str = append (str, \"PLAIN_R2,\" + i + \",,\" + as.scalar (plain_R2 [1, i]));\n+ str = append (str, \"R2,\" + i + \",,\" + as.scalar (R2 [1, i]));\nstr = append (str, \"ADJUSTED_R2,\" + i + \",,\" + as.scalar (adjust_R2 [1, i]));\n- str = append (str, \"PLAIN_R2_NOBIAS,\" + i + \",,\" + as.scalar (plain_R2_nobias [1, i]));\n+ str = append (str, \"R2_NOBIAS,\" + i + \",,\" + as.scalar (R2_nobias [1, i]));\nstr = append (str, \"ADJUSTED_R2_NOBIAS,\" + i + \",,\" + as.scalar (adjust_R2_nobias [1, i]));\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/LinearRegCG.dml",
"new_path": "scripts/algorithms/LinearRegCG.dml",
"diff": "# AVG_RES_Y Average of the residual Y - pred(Y|X), i.e. residual bias\n# STDEV_RES_Y Standard Deviation of the residual Y - pred(Y|X)\n# DISPERSION GLM-style dispersion, i.e. residual sum of squares / # deg. fr.\n-# PLAIN_R2 Plain R^2 of residual with bias included vs. total average\n+# R2 R^2 of residual with bias included vs. total average\n# ADJUSTED_R2 Adjusted R^2 of residual with bias included vs. total average\n-# PLAIN_R2_NOBIAS Plain R^2 of residual with bias subtracted vs. total average\n+# R2_NOBIAS R^2 of residual with bias subtracted vs. total average\n# ADJUSTED_R2_NOBIAS Adjusted R^2 of residual with bias subtracted vs. total average\n-# PLAIN_R2_VS_0 * Plain R^2 of residual with bias included vs. zero constant\n+# R2_VS_0 * R^2 of residual with bias included vs. zero constant\n# ADJUSTED_R2_VS_0 * Adjusted R^2 of residual with bias included vs. zero constant\n# -------------------------------------------------------------------------------------\n# * The last two statistics are only printed if there is no intercept (icpt=0)\n@@ -223,7 +223,7 @@ avg_res = sum (y_residual) / n;\nss_res = sum (y_residual ^ 2);\nss_avg_res = ss_res - n * avg_res ^ 2;\n-plain_R2 = 1 - ss_res / ss_avg_tot;\n+R2 = 1 - ss_res / ss_avg_tot;\nif (n > m_ext) {\ndispersion = ss_res / (n - m_ext);\nadjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));\n@@ -232,7 +232,7 @@ if (n > m_ext) {\nadjusted_R2 = 0.0 / 0.0;\n}\n-plain_R2_nobias = 1 - ss_avg_res / ss_avg_tot;\n+R2_nobias = 1 - ss_avg_res / ss_avg_tot;\ndeg_freedom = n - m - 1;\nif (deg_freedom > 0) {\nvar_res = ss_avg_res / deg_freedom;\n@@ -243,7 +243,7 @@ if (deg_freedom > 0) {\nprint (\"Warning: zero or negative number of degrees of freedom.\");\n}\n-plain_R2_vs_0 = 1 - ss_res / ss_tot;\n+R2_vs_0 = 1 - ss_res / ss_tot;\nif (n > m) {\nadjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);\n} else {\n@@ -255,12 +255,12 @@ str = append (str, \"STDEV_TOT_Y,\" + sqrt (var_tot)); # Standard Dev\nstr = append (str, \"AVG_RES_Y,\" + avg_res); # Average of the residual Y - pred(Y|X), i.e. residual bias\nstr = append (str, \"STDEV_RES_Y,\" + sqrt (var_res)); # Standard Deviation of the residual Y - pred(Y|X)\nstr = append (str, \"DISPERSION,\" + dispersion); # GLM-style dispersion, i.e. residual sum of squares / # d.f.\n-str = append (str, \"PLAIN_R2,\" + plain_R2); # Plain R^2 of residual with bias included vs. total average\n+str = append (str, \"R2,\" + R2); # R^2 of residual with bias included vs. total average\nstr = append (str, \"ADJUSTED_R2,\" + adjusted_R2); # Adjusted R^2 of residual with bias included vs. total average\n-str = append (str, \"PLAIN_R2_NOBIAS,\" + plain_R2_nobias); # Plain R^2 of residual with bias subtracted vs. total average\n+str = append (str, \"R2_NOBIAS,\" + R2_nobias); # R^2 of residual with bias subtracted vs. total average\nstr = append (str, \"ADJUSTED_R2_NOBIAS,\" + adjusted_R2_nobias); # Adjusted R^2 of residual with bias subtracted vs. total average\nif (intercept_status == 0) {\n- str = append (str, \"PLAIN_R2_VS_0,\" + plain_R2_vs_0); # Plain R^2 of residual with bias included vs. zero constant\n+ str = append (str, \"R2_VS_0,\" + R2_vs_0); # R^2 of residual with bias included vs. zero constant\nstr = append (str, \"ADJUSTED_R2_VS_0,\" + adjusted_R2_vs_0); # Adjusted R^2 of residual with bias included vs. zero constant\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/LinearRegDS.dml",
"new_path": "scripts/algorithms/LinearRegDS.dml",
"diff": "# AVG_RES_Y Average of the residual Y - pred(Y|X), i.e. residual bias\n# STDEV_RES_Y Standard Deviation of the residual Y - pred(Y|X)\n# DISPERSION GLM-style dispersion, i.e. residual sum of squares / # deg. fr.\n-# PLAIN_R2 Plain R^2 of residual with bias included vs. total average\n+# R2 R^2 of residual with bias included vs. total average\n# ADJUSTED_R2 Adjusted R^2 of residual with bias included vs. total average\n-# PLAIN_R2_NOBIAS Plain R^2 of residual with bias subtracted vs. total average\n+# R2_NOBIAS R^2 of residual with bias subtracted vs. total average\n# ADJUSTED_R2_NOBIAS Adjusted R^2 of residual with bias subtracted vs. total average\n-# PLAIN_R2_VS_0 * Plain R^2 of residual with bias included vs. zero constant\n+# R2_VS_0 * R^2 of residual with bias included vs. zero constant\n# ADJUSTED_R2_VS_0 * Adjusted R^2 of residual with bias included vs. zero constant\n# -------------------------------------------------------------------------------------\n# * The last two statistics are only printed if there is no intercept (icpt=0)\n@@ -165,7 +165,7 @@ avg_res = sum (y_residual) / n;\nss_res = sum (y_residual ^ 2);\nss_avg_res = ss_res - n * avg_res ^ 2;\n-plain_R2 = 1 - ss_res / ss_avg_tot;\n+R2 = 1 - ss_res / ss_avg_tot;\nif (n > m_ext) {\ndispersion = ss_res / (n - m_ext);\nadjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));\n@@ -174,7 +174,7 @@ if (n > m_ext) {\nadjusted_R2 = 0.0 / 0.0;\n}\n-plain_R2_nobias = 1 - ss_avg_res / ss_avg_tot;\n+R2_nobias = 1 - ss_avg_res / ss_avg_tot;\ndeg_freedom = n - m - 1;\nif (deg_freedom > 0) {\nvar_res = ss_avg_res / deg_freedom;\n@@ -185,7 +185,7 @@ if (deg_freedom > 0) {\nprint (\"Warning: zero or negative number of degrees of freedom.\");\n}\n-plain_R2_vs_0 = 1 - ss_res / ss_tot;\n+R2_vs_0 = 1 - ss_res / ss_tot;\nif (n > m) {\nadjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);\n} else {\n@@ -197,12 +197,12 @@ str = append (str, \"STDEV_TOT_Y,\" + sqrt (var_tot)); # Standard Dev\nstr = append (str, \"AVG_RES_Y,\" + avg_res); # Average of the residual Y - pred(Y|X), i.e. residual bias\nstr = append (str, \"STDEV_RES_Y,\" + sqrt (var_res)); # Standard Deviation of the residual Y - pred(Y|X)\nstr = append (str, \"DISPERSION,\" + dispersion); # GLM-style dispersion, i.e. residual sum of squares / # d.f.\n-str = append (str, \"PLAIN_R2,\" + plain_R2); # Plain R^2 of residual with bias included vs. total average\n+str = append (str, \"R2,\" + R2); # R^2 of residual with bias included vs. total average\nstr = append (str, \"ADJUSTED_R2,\" + adjusted_R2); # Adjusted R^2 of residual with bias included vs. total average\n-str = append (str, \"PLAIN_R2_NOBIAS,\" + plain_R2_nobias); # Plain R^2 of residual with bias subtracted vs. total average\n+str = append (str, \"R2_NOBIAS,\" + R2_nobias); # R^2 of residual with bias subtracted vs. total average\nstr = append (str, \"ADJUSTED_R2_NOBIAS,\" + adjusted_R2_nobias); # Adjusted R^2 of residual with bias subtracted vs. total average\nif (intercept_status == 0) {\n- str = append (str, \"PLAIN_R2_VS_0,\" + plain_R2_vs_0); # Plain R^2 of residual with bias included vs. zero constant\n+ str = append (str, \"R2_VS_0,\" + R2_vs_0); # R^2 of residual with bias included vs. zero constant\nstr = append (str, \"ADJUSTED_R2_VS_0,\" + adjusted_R2_vs_0); # Adjusted R^2 of residual with bias included vs. zero constant\n}\n"
},
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/StepLinearRegDS.dml",
"new_path": "scripts/algorithms/StepLinearRegDS.dml",
"diff": "# AVG_RES_Y Average of the residual Y - pred(Y|X), i.e. residual bias\n# STDEV_RES_Y Standard Deviation of the residual Y - pred(Y|X)\n# DISPERSION GLM-style dispersion, i.e. residual sum of squares / # deg. fr.\n-# PLAIN_R2 Plain R^2 of residual with bias included vs. total average\n+# R2 R^2 of residual with bias included vs. total average\n# ADJUSTED_R2 Adjusted R^2 of residual with bias included vs. total average\n-# PLAIN_R2_NOBIAS Plain R^2 of residual with bias subtracted vs. total average\n+# R2_NOBIAS R^2 of residual with bias subtracted vs. total average\n# ADJUSTED_R2_NOBIAS Adjusted R^2 of residual with bias subtracted vs. total average\n-# PLAIN_R2_VS_0 * Plain R^2 of residual with bias included vs. zero constant\n+# R2_VS_0 * R^2 of residual with bias included vs. zero constant\n# ADJUSTED_R2_VS_0 * Adjusted R^2 of residual with bias included vs. zero constant\n# -------------------------------------------------------------------------------------\n# * The last two statistics are only printed if there is no intercept (icpt=0)\n@@ -271,7 +271,7 @@ linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\n# ss_res = sum (y_residual ^ 2);\nss_avg_res = ss_res - n * avg_res ^ 2;\n- plain_R2 = 1 - ss_res / ss_avg_tot;\n+ R2 = 1 - ss_res / ss_avg_tot;\nif (n > m_ext) {\ndispersion = ss_res / (n - m_ext);\nadjusted_R2 = 1 - dispersion / (ss_avg_tot / (n - 1));\n@@ -280,7 +280,7 @@ linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\nadjusted_R2 = 0.0 / 0.0;\n}\n- plain_R2_nobias = 1 - ss_avg_res / ss_avg_tot;\n+ R2_nobias = 1 - ss_avg_res / ss_avg_tot;\ndeg_freedom = n - m - 1;\nif (deg_freedom > 0) {\nvar_res = ss_avg_res / deg_freedom;\n@@ -291,7 +291,7 @@ linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\nprint (\"Warning: zero or negative number of degrees of freedom.\");\n}\n- plain_R2_vs_0 = 1 - ss_res / ss_tot;\n+ R2_vs_0 = 1 - ss_res / ss_tot;\nif (n > m) {\nadjusted_R2_vs_0 = 1 - (ss_res / (n - m)) / (ss_tot / n);\n} else {\n@@ -303,12 +303,12 @@ linear_regression = function (Matrix[Double] X, Matrix[Double] y, Double m_orig,\nstr = append (str, \"AVG_RES_Y,\" + avg_res); # Average of the residual Y - pred(Y|X), i.e. residual bias\nstr = append (str, \"STDEV_RES_Y,\" + sqrt (var_res)); # Standard Deviation of the residual Y - pred(Y|X)\nstr = append (str, \"DISPERSION,\" + dispersion); # GLM-style dispersion, i.e. residual sum of squares / # d.f.\n- str = append (str, \"PLAIN_R2,\" + plain_R2); # Plain R^2 of residual with bias included vs. total average\n+ str = append (str, \"R2,\" + R2); # R^2 of residual with bias included vs. total average\nstr = append (str, \"ADJUSTED_R2,\" + adjusted_R2); # Adjusted R^2 of residual with bias included vs. total average\n- str = append (str, \"PLAIN_R2_NOBIAS,\" + plain_R2_nobias); # Plain R^2 of residual with bias subtracted vs. total average\n+ str = append (str, \"R2_NOBIAS,\" + R2_nobias); # R^2 of residual with bias subtracted vs. total average\nstr = append (str, \"ADJUSTED_R2_NOBIAS,\" + adjusted_R2_nobias); # Adjusted R^2 of residual with bias subtracted vs. total average\nif (intercept_status == 0) {\n- str = append (str, \"PLAIN_R2_VS_0,\" + plain_R2_vs_0); # Plain R^2 of residual with bias included vs. zero constant\n+ str = append (str, \"R2_VS_0,\" + R2_vs_0); # R^2 of residual with bias included vs. zero constant\nstr = append (str, \"ADJUSTED_R2_VS_0,\" + adjusted_R2_vs_0); # Adjusted R^2 of residual with bias included vs. zero constant\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1455] Change the term PLAIN_R2 to R2
Closes #500. |
49,768 | 21.05.2017 11:05:12 | 25,200 | 259742c400287d2dbc85ca30631f06af7e33cf43 | Update notebook samples with latest code
Removed SystemML setup instructions from most of the notebooks. | [
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/Autoencoder.ipynb",
"new_path": "samples/jupyter-notebooks/Autoencoder.ipynb",
"diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"# Install SystemML Python package and jar file\\n\",\n- \"You can install SystemML Python tgz file either from \\n\",\n- \" 1. Distribution location (https://dist.apache.org/repos/dist/release/incubator/systemml/) or \\n\",\n- \" 2. Latest daily built code (https://sparktc.ibmcloud.com/repo/latest/) or\\n\",\n- \" 3. From your local system if you have extracted recent code and built locally. \\n\",\n- \" (e.g. ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz)\\n\",\n- \" \\n\",\n\"### This notebook is supported with SystemML 0.14.0 and above.\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"# !pip install --user systemml>=0.14.0\\n\",\n- \"\\n\",\n- \"!pip install https://dist.apache.org/repos/dist/release/incubator/systemml/0.14.0-incubating/systemml-0.14.0-incubating-python.tgz\\n\",\n- \" \\n\",\n- \"# !pip install https://sparktc.ibmcloud.com/repo/latest/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\\n\",\n- \" \\n\",\n- \"# !pip install ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"!pip show systemml\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"import pandas as pd\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"X_pd = pd.DataFrame(range(1, 2001,1),dtype=float).values.reshape(100,20)\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"!ls -l /tmp/data/Input\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"autoencoderURL = \\\"https://raw.githubusercontent.com/apache/incubator-systemml/master/scripts/staging/autoencoder-2layer.dml\\\"\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"!ls -l /tmp/data/Output\"\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n\"\\n\",\n\"print (iter, num_iters_per_epoch, beg, end, o)\"\n]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"source\": [\n- \"## Uninstall/Clean up SystemML Python package and jar file\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"!yes | pip uninstall systemml\"\n- ]\n}\n],\n\"metadata\": {\n"
},
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/Linear_Regression_Algorithms_Demo.ipynb",
"new_path": "samples/jupyter-notebooks/Linear_Regression_Algorithms_Demo.ipynb",
"diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n+ \"# Linear Regression Algorithms using Apache SystemML\\n\",\n+ \"\\n\",\n\"This notebook shows:\\n\",\n\"- Install SystemML Python package and jar file\\n\",\n\" - pip\\n\",\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"# Install SystemML Python package and jar file\\n\",\n- \"You can install SystemML Python tgz file either from \\n\",\n- \" 1. Distribution location (https://dist.apache.org/repos/dist/release/incubator/systemml/) or \\n\",\n- \" 2. Latest daily built code (https://sparktc.ibmcloud.com/repo/latest/) or\\n\",\n- \" 3. From your local system if you have extracted recent code and built locally. \\n\",\n- \" (e.g. ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz)\\n\",\n- \" \\n\",\n\"### This notebook is supported with SystemML 0.14.0 and above.\"\n]\n},\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n- \"scrolled\": false\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"# !pip install --user systemml>=0.14.0\\n\",\n- \"\\n\",\n- \"!pip install https://dist.apache.org/repos/dist/release/incubator/systemml/0.14.0-incubating/systemml-0.14.0-incubating-python.tgz\\n\",\n- \" \\n\",\n- \"# !pip install https://sparktc.ibmcloud.com/repo/latest/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\\n\",\n- \" \\n\",\n- \"# !pip install ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"from systemml import MLContext, dml, dmlFromResource\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"ml.execute(dml(\\\"\\\"\\\"s = 'Hello World!'\\\"\\\"\\\").output(\\\"s\\\")).get(\\\"s\\\")\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"prog = dml(script).input('$nr', 1e5).output('s')\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"prog = dml(script).input('$nr', 1e6).output('s')\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"diabetes = datasets.load_diabetes()\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"diabetes.data.shape\"\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": false\n},\n\"outputs\": [],\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": false\n},\n\"outputs\": [],\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"import os\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"plt.scatter(diabetes_X_train, diabetes_y_train, color='black')\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"regr = LinearRegression(sqlCtx)\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"# Use the trained model to perform prediction\\n\",\n\"\\n\",\n\"plt.plot(diabetes_X_test, predictions, color='black')\"\n]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"# Uninstall/Clean up SystemML Python package and jar file\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"!yes | pip uninstall systemml\"\n- ]\n}\n],\n\"metadata\": {\n"
},
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/SystemML-PySpark-Recommendation-Demo.ipynb",
"new_path": "samples/jupyter-notebooks/SystemML-PySpark-Recommendation-Demo.ipynb",
"diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"# Install SystemML Python package and jar file\\n\",\n- \"You can install SystemML Python tgz file either from \\n\",\n- \" 1. Distribution location (https://dist.apache.org/repos/dist/release/incubator/systemml/) or \\n\",\n- \" 2. Latest daily built code (https://sparktc.ibmcloud.com/repo/latest/) or\\n\",\n- \" 3. From your local system if you have extracted recent code and built locally. \\n\",\n- \" (e.g. ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz)\\n\",\n- \" \\n\",\n\"### This notebook is supported with SystemML 0.14.0 and above.\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"# !pip install --user systemml>=0.14.0\\n\",\n- \"\\n\",\n- \"!pip install https://dist.apache.org/repos/dist/release/incubator/systemml/0.14.0-incubating/systemml-0.14.0-incubating-python.tgz\\n\",\n- \" \\n\",\n- \"# !pip install https://sparktc.ibmcloud.com/repo/latest/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\\n\",\n- \" \\n\",\n- \"# !pip install ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"!pip show systemml\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"%%sh\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"# Load data\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"# Plot training loss over time\\n\",\n\"plt.ylabel('Loss')\\n\",\n\"plt.title('PNMF Training Loss')\"\n]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"## Uninstall/Clean up SystemML Python package and jar file\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"!yes | pip uninstall systemml\"\n- ]\n}\n],\n\"metadata\": {\n"
},
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/tutorial1.ipynb",
"new_path": "samples/jupyter-notebooks/tutorial1.ipynb",
"diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"# Install SystemML Python package and jar file\\n\",\n- \"You can configre SystemML jar file in Scala kernel by downloading either from \\n\",\n- \" 1. Distribution location (https://dist.apache.org/repos/dist/release/incubator/systemml/) or \\n\",\n- \" 2. Latest daily built code (https://sparktc.ibmcloud.com/repo/latest/) or\\n\",\n- \" 3. From your local system if you have extracted recent code and built locally. \\n\",\n- \" (e.g. ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT.jar)\\n\",\n+ \"# Install SystemML jar file and configure kernel\\n\",\n\" \\n\",\n- \"Please visit SystemML site to know \\\"How to configure Toree(Scala) Kernel\\\".\\n\",\n+ \"Please visit http://systemml.apache.org/install-systemml.html site to know \\\"How to configure Toree(Scala) Kernel\\\".\\n\",\n\" \\n\",\n\"### This notebook is supported with SystemML 0.14.0 and above.\"\n]\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"import org.apache.sysml.api.mlcontext.MLContext\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"val sumScript = \\\"\\\"\\\"\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"outMatrix.show\"\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1606] Update notebook samples with latest code
Removed SystemML setup instructions from most of the notebooks. |
49,768 | 21.05.2017 17:38:56 | 25,200 | fc612f2e08f8bab9daa74a451c0b618c2d92543d | Update notebook samples with latest code
Updated Deep Learning notebook compatible with 0.14 release code | [
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/Deep_Learning_Image_Classification.ipynb",
"new_path": "samples/jupyter-notebooks/Deep_Learning_Image_Classification.ipynb",
"diff": "\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"# Deep Learning Image Classification\\n\",\n+ \"# Deep Learning Image Classification using Apache SystemML\\n\",\n\"\\n\",\n\"This notebook shows SystemML Deep Learning functionality to map images of single digit numbers to their corresponding numeric representations. See [Getting Started with Deep Learning and Python](http://www.pyimagesearch.com/2014/09/22/getting-started-deep-learning-python/) for an explanation of the used deep learning concepts and assumptions.\\n\",\n\"\\n\",\n\"The downloaded MNIST dataset contains labeled images of handwritten digits, where each example is a 28x28 pixel image of grayscale values in the range [0,255] stretched out as 784 pixels, and each label is one of 10 possible digits in [0,9]. We download 60,000 training examples, and 10,000 test examples, where the format is \\\"label, pixel_1, pixel_2, ..., pixel_n\\\". We train a SystemML LeNet model. The results of the learning algorithms have an accuracy of 98 percent.\\n\",\n\"\\n\",\n- \"1. [Install and load SystemML and other libraries](#load_systemml)\\n\",\n\"1. [Download and Access MNIST data](#access_data)\\n\",\n\"1. [Train a CNN classifier for MNIST handwritten digits](#train)\\n\",\n\"1. [Detect handwritten Digits](#predict)\\n\"\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"<a id=\\\"load_systemml\\\"></a>\\n\",\n- \"## Install and load SystemML and other libraries\\n\",\n- \"\\n\",\n- \"You can install SystemML Python tgz file either from \\n\",\n- \" 1. Distribution location (https://dist.apache.org/repos/dist/release/incubator/systemml/) or \\n\",\n- \" 2. Latest daily built code (https://sparktc.ibmcloud.com/repo/latest/) or\\n\",\n- \" 3. From your local system if you have extracted recent code and built locally. \\n\",\n- \" (e.g. ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz)\\n\",\n- \" \\n\",\n- \"### This notebook is supported with master branch (SystemML 1.0.0) as of 05/15/2017 and later code.\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true,\n- \"scrolled\": false\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"# !pip install --user systemml>=1.0.0\\n\",\n- \"\\n\",\n- \"!pip install https://sparktc.ibmcloud.com/repo/latest/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\\n\",\n- \" \\n\",\n- \"# !pip install ~/git/incubator-systemml/target/systemml-1.0.0-incubating-SNAPSHOT-python.tgz\"\n+ \"### This notebook is supported with SystemML 0.14.0 and above.\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": false\n},\n\"outputs\": [],\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"from systemml import MLContext, dml\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n\"source\": [\n- \"Alternatively get the data from here.\"\n+ \"Alternatively get the data from here. (Uncomment curl commands from following cell if you want to download using following approach)\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n\"source\": [\n\"%%sh\\n\",\n- \"curl -O https://pjreddie.com/media/files/mnist_train.csv\\n\",\n- \"curl -O https://pjreddie.com/media/files/mnist_test.csv\\n\",\n+ \"cd data/mnist\\n\",\n+ \"# curl -O https://pjreddie.com/media/files/mnist_train.csv\\n\",\n+ \"# curl -O https://pjreddie.com/media/files/mnist_test.csv\\n\",\n\"wc -l mnist*\"\n]\n},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"trainData = np.genfromtxt('data/mnist/mnist_train.csv', delimiter=\\\",\\\")\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"pd.set_option('display.max_columns', 200)\\n\",\n\"pd.DataFrame(testData[1:10,],dtype='uint')\"\n]\n},\n+ {\n+ \"cell_type\": \"markdown\",\n+ \"metadata\": {},\n+ \"source\": [\n+ \"### Following command is not required for code above SystemML 0.14 (master branch dated 05/15/2017 or later)\"\n+ ]\n+ },\n+ {\n+ \"cell_type\": \"code\",\n+ \"execution_count\": null,\n+ \"metadata\": {},\n+ \"outputs\": [],\n+ \"source\": [\n+ \"!svn --force export https://github.com/apache/incubator-systemml/trunk/scripts/nn\"\n+ ]\n+ },\n{\n\"cell_type\": \"markdown\",\n\"metadata\": {},\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"scriptPredict = \\\"\\\"\\\"\\n\",\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n- \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n+ \"metadata\": {},\n\"outputs\": [],\n\"source\": [\n\"pd.set_option('display.max_columns', 28)\\n\",\n\"pd.DataFrame((testData[i,1:]).reshape(img_size, img_size),dtype='uint')\"\n]\n- },\n- {\n- \"cell_type\": \"markdown\",\n- \"metadata\": {},\n- \"source\": [\n- \"## Uninstall/Clean up SystemML Python package and jar file\"\n- ]\n- },\n- {\n- \"cell_type\": \"code\",\n- \"execution_count\": null,\n- \"metadata\": {\n- \"collapsed\": true\n- },\n- \"outputs\": [],\n- \"source\": [\n- \"!yes | pip uninstall systemml\"\n- ]\n}\n],\n\"metadata\": {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1606] Update notebook samples with latest code
Updated Deep Learning notebook compatible with 0.14 release code |
49,772 | 22.05.2017 16:39:09 | 25,200 | e93fdd35b82e95fd41023850939593d5d37c8512 | Add stain normalization to breast cancer preprocessing.
Adding stain normalization of H&E histology slides for the breast cancer
project.
Closes | [
{
"change_type": "MODIFY",
"old_path": "projects/breast_cancer/breastcancer/preprocessing.py",
"new_path": "projects/breast_cancer/breastcancer/preprocessing.py",
"diff": "@@ -199,7 +199,7 @@ def process_tile_index(tile_index, folder, training):\n# Create tile generator.\ngenerator = create_tile_generator(slide, tile_size, overlap)\n# Generate tile.\n- tile = np.array(generator.get_tile(zoom_level, (col, row)))\n+ tile = np.asarray(generator.get_tile(zoom_level, (col, row)))\nreturn (slide_num, tile)\n@@ -291,7 +291,7 @@ def keep_tile(tile_tuple, tile_size, tissue_threshold):\nreturn False\n-# Generate Flattened Samples From Tile\n+# Generate Samples From Tile\ndef process_tile(tile_tuple, sample_size, grayscale):\n\"\"\"\n@@ -313,11 +313,8 @@ def process_tile(tile_tuple, sample_size, grayscale):\nReturns:\nA list of (slide_num, sample) tuples representing cut up tiles,\n- where each sample has been transposed from\n- (sample_size_x, sample_size_y, channels) to\n- (channels, sample_size_x, sample_size_y),\n- and flattened to a vector of length\n- (channels*sample_size_x*sample_size_y).\n+ where each sample is a 3D NumPy array of shape\n+ (sample_size_x, sample_size_y, channels).\n\"\"\"\nslide_num, tile = tile_tuple\nif grayscale:\n@@ -332,18 +329,160 @@ def process_tile(tile_tuple, sample_size, grayscale):\n# (num_x, num_y, sample_size_x, sample_size_y, ch).\n# 3. Combine num_x and num_y into single axis, returning\n# (num_samples, sample_size_x, sample_size_y, ch).\n- # 4. Swap axes from (num_samples, sample_size_x, sample_size_y, ch) to\n- # (num_samples, ch, sample_size_x, sample_size_y).\n- # 5. Flatten samples into (num_samples, ch*sample_size_x*sample_size_y).\nsamples = (tile.reshape((x // sample_size, sample_size, y // sample_size, sample_size, ch))\n.swapaxes(1,2)\n- .reshape((-1, sample_size, sample_size, ch))\n- .transpose(0,3,1,2))\n- samples = samples.reshape(samples.shape[0], -1)\n+ .reshape((-1, sample_size, sample_size, ch)))\nsamples = [(slide_num, sample) for sample in list(samples)]\nreturn samples\n+# Normalize staining\n+\n+def normalize_staining(sample_tuple, beta=0.15, alpha=1, light_intensity=255):\n+ \"\"\"\n+ Normalize the staining of H&E histology slides.\n+\n+ This function normalizes the staining of H&E histology slides.\n+\n+ References:\n+ - Macenko, Marc, et al. \"A method for normalizing histology slides\n+ for quantitative analysis.\" Biomedical Imaging: From Nano to Macro,\n+ 2009. ISBI'09. IEEE International Symposium on. IEEE, 2009.\n+ - http://wwwx.cs.unc.edu/~mn/sites/default/files/macenko2009.pdf\n+ - https://github.com/mitkovetta/staining-normalization\n+\n+ Args:\n+ sample_tuple: A (slide_num, sample) tuple, where slide_num is an\n+ integer, and sample is a 3D NumPy array of shape (H,W,C).\n+\n+ Returns:\n+ A (slide_num, sample) tuple, where the sample is a 3D NumPy array\n+ of shape (H,W,C) that has been stain normalized.\n+ \"\"\"\n+ # Setup.\n+ slide_num, sample = sample_tuple\n+ x = np.asarray(sample)\n+ h, w, c = x.shape\n+ x = x.reshape(-1, c).astype(np.float64) # shape (H*W, C)\n+\n+ # Reference stain vectors and stain saturations. We will normalize all slides\n+ # to these references. To create these, grab the stain vectors and stain\n+ # saturations from a desirable slide.\n+\n+ # Values in reference implementation for use with eigendecomposition approach, natural log,\n+ # and `light_intensity=240`.\n+ #stain_ref = np.array([0.5626, 0.2159, 0.7201, 0.8012, 0.4062, 0.5581]).reshape(3,2)\n+ #max_sat_ref = np.array([1.9705, 1.0308]).reshape(2,1)\n+\n+ # SVD w/ log10, and `light_intensity=255`.\n+ stain_ref = (np.array([0.54598845, 0.322116, 0.72385198, 0.76419107, 0.42182333, 0.55879629])\n+ .reshape(3,2))\n+ max_sat_ref = np.array([0.82791151, 0.61137274]).reshape(2,1)\n+\n+ # Convert RGB to OD.\n+ # Note: The original paper used log10, and the reference implementation used the natural log.\n+ #OD = -np.log((x+1)/light_intensity) # shape (H*W, C)\n+ OD = -np.log10(x/light_intensity + 1e-8)\n+\n+ # Remove data with OD intensity less than beta.\n+ # I.e. remove transparent pixels.\n+ # Note: This needs to be checked per channel, rather than\n+ # taking an average over all channels for a given pixel.\n+ OD_thresh = OD[np.all(OD >= beta, 1), :] # shape (K, C)\n+\n+ # Calculate eigenvectors.\n+ # Note: We can either use eigenvector decomposition, or SVD.\n+ #eigvals, eigvecs = np.linalg.eig(np.cov(OD_thresh.T)) # np.cov results in inf/nans\n+ U, s, V = np.linalg.svd(OD_thresh, full_matrices=False)\n+\n+ # Extract two largest eigenvectors.\n+ # Note: We swap the sign of the eigvecs here to be consistent\n+ # with other implementations. Both +/- eigvecs are valid, with\n+ # the same eigenvalue, so this is okay.\n+ #top_eigvecs = eigvecs[:, np.argsort(eigvals)[-2:]] * -1\n+ top_eigvecs = V[0:2, :].T * -1 # shape (C, 2)\n+\n+ # Project thresholded optical density values onto plane spanned by\n+ # 2 largest eigenvectors.\n+ proj = np.dot(OD_thresh, top_eigvecs) # shape (K, 2)\n+\n+ # Calculate angle of each point wrt the first plane direction.\n+ # Note: the parameters are `np.arctan2(y, x)`\n+ angles = np.arctan2(proj[:, 1], proj[:, 0]) # shape (K,)\n+\n+ # Find robust extremes (a and 100-a percentiles) of the angle.\n+ min_angle = np.percentile(angles, alpha)\n+ max_angle = np.percentile(angles, 100-alpha)\n+\n+ # Convert min/max vectors (extremes) back to optimal stains in OD space.\n+ # This computes a set of axes for each angle onto which we can project\n+ # the top eigenvectors. This assumes that the projected values have\n+ # been normalized to unit length.\n+ extreme_angles = np.array(\n+ [[np.cos(min_angle), np.cos(max_angle)],\n+ [np.sin(min_angle), np.sin(max_angle)]]\n+ ) # shape (2,2)\n+ stains = np.dot(top_eigvecs, extreme_angles) # shape (C, 2)\n+\n+ # Merge vectors with hematoxylin first, and eosin second, as a heuristic.\n+ if stains[0, 0] < stains[0, 1]:\n+ stains[:, [0, 1]] = stains[:, [1, 0]] # swap columns\n+\n+ # Calculate saturations of each stain.\n+ # Note: Here, we solve\n+ # OD = VS\n+ # S = V^{-1}OD\n+ # where `OD` is the matrix of optical density values of our image,\n+ # `V` is the matrix of stain vectors, and `S` is the matrix of stain\n+ # saturations. Since this is an overdetermined system, we use the\n+ # least squares solver, rather than a direct solve.\n+ sats, _, _, _ = np.linalg.lstsq(stains, OD.T)\n+\n+ # Normalize stain saturations to have same pseudo-maximum based on\n+ # a reference max saturation.\n+ max_sat = np.percentile(sats, 99, axis=1, keepdims=True)\n+ sats = sats / max_sat * max_sat_ref\n+\n+ # Compute optimal OD values.\n+ OD_norm = np.dot(stain_ref, sats)\n+\n+ # Recreate image.\n+ # Note: If the image is immediately converted to uint8 with `.astype(np.uint8)`, it will\n+ # not return the correct values due to the initital values being outside of [0,255].\n+ # To fix this, we round to the nearest integer, and then clip to [0,255], which is the\n+ # same behavior as Matlab.\n+ #x_norm = np.exp(OD_norm) * light_intensity # natural log approach\n+ x_norm = 10**(-OD_norm) * light_intensity - 1e-8 # log10 approach\n+ x_norm = np.clip(np.round(x_norm), 0, 255).astype(np.uint8)\n+ x_norm = x_norm.astype(np.uint8)\n+ x_norm = x_norm.T.reshape(h,w,c)\n+ return (slide_num, x_norm)\n+\n+\n+def flatten_sample(sample_tuple):\n+ \"\"\"\n+ Flatten a (H,W,C) sample into a (C*H*W) row vector.\n+\n+ Transpose each sample from (H, W, channels) to (channels, H, W), then\n+ flatten each into a vector of length channels*H*W.\n+\n+ Args:\n+ sample_tuple: A (slide_num, sample) tuple, where slide_num is an\n+ integer, and sample is a 3D NumPy array of shape (H,W,C).\n+\n+ Returns:\n+ A (slide_num, sample) tuple, where the sample has been transposed\n+ from (H,W,C) to (C,H,W), and flattened to a vector of length\n+ (C*H*W).\n+ \"\"\"\n+ slide_num, sample = sample_tuple\n+ # 1. Swap axes from (sample_size_x, sample_size_y, ch) to\n+ # (ch, sample_size_x, sample_size_y).\n+ # 2. Flatten sample into (ch*sample_size_x*sample_size_y).\n+ flattened_sample = sample.transpose(2,0,1).reshape(-1)\n+ return (slide_num, flattened_sample)\n+\n+\n# Get Ground Truth Labels\ndef get_labels_df(folder):\n@@ -369,7 +508,8 @@ def get_labels_df(folder):\n# Process All Slides Into A Spark DataFrame\ndef preprocess(spark, slide_nums, folder=\"data\", training=True, tile_size=1024, overlap=0,\n- tissue_threshold=0.9, sample_size=256, grayscale=False, num_partitions=20000):\n+ tissue_threshold=0.9, sample_size=256, grayscale=False, normalize_stains=True,\n+ num_partitions=20000):\n\"\"\"\nPreprocess a set of whole-slide images.\n@@ -399,6 +539,7 @@ def preprocess(spark, slide_nums, folder=\"data\", training=True, tile_size=1024,\ngenerated.\ngrayscale: Whether or not to generate grayscale samples, rather\nthan RGB.\n+ normalize_stains: Whether or not to apply stain normalization.\nnum_partitions: Number of partitions to use during processing.\nReturns:\n@@ -418,17 +559,16 @@ def preprocess(spark, slide_nums, folder=\"data\", training=True, tile_size=1024,\n#row_mb = tile_size * tile_size * channels * 8 / 1024 / 1024 # size of one row in MB\n#rows_per_part = round(part_size / row_mb)\n#num_parts = rows / rows_per_part\n- ## HACK: Force even partitioning by collecting and parallelizing -- for memory issues.\n- ## Note: This was a PySpark bug with a fix in the master branch now.\n- #tile_indices = tile_indices.collect()\n- #tile_indices = sc.parallelize(tile_indices, num_partitions)\n- ## END HACK\ntile_indices = tile_indices.repartition(num_partitions)\ntile_indices.cache()\n- # Extract all tiles into a DataFrame, filter, and cut into smaller samples.\n+ # Extract all tiles into a DataFrame, filter, cut into smaller samples, apply stain\n+ # normalization, and flatten.\ntiles = tile_indices.map(lambda tile_index: process_tile_index(tile_index, folder, training))\nfiltered_tiles = tiles.filter(lambda tile: keep_tile(tile, tile_size, tissue_threshold))\nsamples = filtered_tiles.flatMap(lambda tile: process_tile(tile, sample_size, grayscale))\n+ if normalize_stains:\n+ samples = samples.map(lambda sample: normalize_staining(sample))\n+ samples = samples.map(lambda sample: flatten_sample(sample))\nif training:\n# Append labels\nlabels_df = get_labels_df(folder)\n@@ -441,7 +581,6 @@ def preprocess(spark, slide_nums, folder=\"data\", training=True, tile_size=1024,\nelse: # testing data -- no labels\ndf = samples.toDF([\"slide_num\", \"sample\"])\ndf = df.select(df.slide_num.astype(\"int\"), df[\"sample\"])\n- #df = df.repartition(num_partitions) # HACK: Even out the partitions to avoid saving issues\nreturn df\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1465] Add stain normalization to breast cancer preprocessing.
Adding stain normalization of H&E histology slides for the breast cancer
project.
Closes #507. |
49,768 | 23.05.2017 11:08:36 | 25,200 | 16ccfa0571d5a69c2e22ea2c6c1463102849d59c | Update notebook samples with latest code
Updated few sample notebooks to support both Python 2.x and Python 3.x | [
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/Autoencoder.ipynb",
"new_path": "samples/jupyter-notebooks/Autoencoder.ipynb",
"diff": "{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"!pip show systemml\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"import pandas as pd\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n- \"X_pd = pd.DataFrame(range(1, 2001,1),dtype=float).values.reshape(100,20)\\n\",\n+ \"import numpy as np\\n\",\n+ \"X_pd = pd.DataFrame(np.arange(1,2001, dtype=np.float)).values.reshape(100,20)\\n\",\n+ \"# X_pd = pd.DataFrame(range(1, 2001,1),dtype=float).values.reshape(100,20)\\n\",\n\"script =\\\"\\\"\\\"\\n\",\n\" write(X, $Xfile)\\n\",\n\"\\\"\\\"\\\"\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"!ls -l /tmp/data/Input\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"autoencoderURL = \\\"https://raw.githubusercontent.com/apache/incubator-systemml/master/scripts/staging/autoencoder-2layer.dml\\\"\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"!ls -l /tmp/data/Output\"\n\"cell_type\": \"code\",\n\"execution_count\": null,\n\"metadata\": {\n+ \"collapsed\": true,\n\"scrolled\": true\n},\n\"outputs\": [],\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n\"pygments_lexer\": \"ipython2\",\n- \"version\": \"2.7.11\"\n+ \"version\": \"2.7.13\"\n}\n},\n\"nbformat\": 4,\n"
},
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/Deep_Learning_Image_Classification.ipynb",
"new_path": "samples/jupyter-notebooks/Deep_Learning_Image_Classification.ipynb",
"diff": "\"\\n\",\n\"ml = MLContext(sc)\\n\",\n\"\\n\",\n- \"print \\\"Spark Version:\\\", sc.version\\n\",\n- \"print \\\"SystemML Version:\\\", ml.version()\\n\",\n- \"print \\\"SystemML Built-Time:\\\", ml.buildTime()\"\n+ \"print (\\\"Spark Version:\\\" + sc.version)\\n\",\n+ \"print (\\\"SystemML Version:\\\" + ml.version())\\n\",\n+ \"print (\\\"SystemML Built-Time:\\\" + ml.buildTime())\"\n]\n},\n{\n\"source\": [\n\"mnist = datasets.fetch_mldata(\\\"MNIST Original\\\")\\n\",\n\"\\n\",\n- \"print \\\"Mnist data features:\\\", mnist.data.shape\\n\",\n- \"print \\\"Mnist data label:\\\", mnist.target.shape\\n\",\n+ \"print (\\\"Mnist data features:\\\" + str(mnist.data.shape))\\n\",\n+ \"print (\\\"Mnist data label:\\\" + str(mnist.target.shape))\\n\",\n\"\\n\",\n\"trainX, testX, trainY, testY = train_test_split(mnist.data, mnist.target.astype(\\\"int0\\\"), test_size = 0.142857)\\n\",\n\"\\n\",\n\"trainD = np.concatenate((trainY.reshape(trainY.size, 1), trainX),axis=1)\\n\",\n\"testD = np.concatenate((testY.reshape (testY.size, 1), testX),axis=1)\\n\",\n\"\\n\",\n- \"print \\\"Images for training:\\\", trainD.shape\\n\",\n- \"print \\\"Images used for testing:\\\", testD.shape\\n\",\n+ \"print (\\\"Images for training:\\\" + str(trainD.shape))\\n\",\n+ \"print (\\\"Images used for testing:\\\" + str(testD.shape))\\n\",\n\"pix = int(np.sqrt(trainD.shape[1]))\\n\",\n- \"print \\\"Each image is:\\\", pix, \\\"by\\\", pix, \\\"pixels\\\"\\n\",\n+ \"print (\\\"Each image is: \\\" + str(pix) + \\\" by \\\" + str(pix) + \\\" pixels\\\")\\n\",\n\"\\n\",\n\"np.savetxt('data/mnist/mnist_train.csv', trainD, fmt='%u', delimiter=\\\",\\\")\\n\",\n\"np.savetxt('data/mnist/mnist_test.csv', testD, fmt='%u', delimiter=\\\",\\\")\"\n\"trainData = np.genfromtxt('data/mnist/mnist_train.csv', delimiter=\\\",\\\")\\n\",\n\"testData = np.genfromtxt('data/mnist/mnist_test.csv', delimiter=\\\",\\\")\\n\",\n\"\\n\",\n- \"print \\\"Training data: \\\", trainData.shape\\n\",\n- \"print \\\"Test data: \\\", testData.shape\"\n+ \"print (\\\"Training data: \\\" + str(trainData.shape))\\n\",\n+ \"print (\\\"Test data: \\\" + str(testData.shape))\"\n]\n},\n{\n\"\\n\",\n\"predictions = ml.execute(script).get(\\\"predictions\\\").toNumPy()\\n\",\n\"\\n\",\n- \"print classification_report(testData[:,0], predictions)\"\n+ \"print (classification_report(testData[:,0], predictions))\"\n]\n},\n{\n},\n\"outputs\": [],\n\"source\": [\n- \"img_size = np.sqrt(testData.shape[1] - 1)\\n\",\n+ \"img_size = int(np.sqrt(testData.shape[1] - 1))\\n\",\n\"\\n\",\n\"def displayImage(i):\\n\",\n\" image = (testData[i,1:]).reshape((img_size, img_size)).astype(\\\"uint8\\\")\\n\",\n\"\\n\",\n\"p = predictImage(i)\\n\",\n\"\\n\",\n- \"print \\\"Image\\\", i, \\\"\\\\nPredicted digit:\\\", p, \\\"\\\\nActual digit: \\\", testData[i,0], \\\"\\\\nResult: \\\", (p == testData[i,0])\\n\",\n+ \"print (\\\"Image \\\" + str(i) + \\\"\\\\nPredicted digit: \\\" + str(p) + \\\"\\\\nActual digit: \\\" + str(testData[i,0]) + \\\"\\\\nResult: \\\" + str(p == testData[i,0]))\\n\",\n\"\\n\",\n+ \"p\\n\",\n\"displayImage(i)\"\n]\n},\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n\"pygments_lexer\": \"ipython2\",\n- \"version\": \"2.7.11\"\n+ \"version\": \"2.7.13\"\n}\n},\n\"nbformat\": 4,\n"
},
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/Linear_Regression_Algorithms_Demo.ipynb",
"new_path": "samples/jupyter-notebooks/Linear_Regression_Algorithms_Demo.ipynb",
"diff": "\"\\n\",\n\"ml = MLContext(sc)\\n\",\n\"\\n\",\n- \"print \\\"Spark Version:\\\", sc.version\\n\",\n- \"print \\\"SystemML Version:\\\", ml.version()\\n\",\n- \"print \\\"SystemML Built-Time:\\\", ml.buildTime()\"\n+ \"print (\\\"Spark Version:\\\" + sc.version)\\n\",\n+ \"print (\\\"SystemML Version:\\\" + ml.version())\\n\",\n+ \"print (\\\"SystemML Built-Time:\\\"+ ml.buildTime())\"\n]\n},\n{\n\"source\": [\n\"prog = dml(script).input('$nr', 1e5).output('s')\\n\",\n\"s = ml.execute(prog).get('s')\\n\",\n- \"print s\"\n+ \"print (s)\"\n]\n},\n{\n\"source\": [\n\"prog = dml(script).input('$nr', 1e6).output('s')\\n\",\n\"out = ml.execute(prog).get('s')\\n\",\n- \"print out\"\n+ \"print (out)\"\n]\n},\n{\n\"w = ml.execute(prog).get('beta_out')\\n\",\n\"w = w.toNumPy()\\n\",\n\"bias=w[1]\\n\",\n- \"print bias\"\n+ \"print (bias)\"\n]\n},\n{\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n\"pygments_lexer\": \"ipython2\",\n- \"version\": \"2.7.11\"\n+ \"version\": \"2.7.13\"\n}\n},\n\"nbformat\": 4,\n"
},
{
"change_type": "MODIFY",
"old_path": "samples/jupyter-notebooks/SystemML-PySpark-Recommendation-Demo.ipynb",
"new_path": "samples/jupyter-notebooks/SystemML-PySpark-Recommendation-Demo.ipynb",
"diff": "{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"!pip show systemml\"\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"%%sh\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"# Load data\\n\",\n{\n\"cell_type\": \"code\",\n\"execution_count\": null,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"collapsed\": true\n+ },\n\"outputs\": [],\n\"source\": [\n\"# Plot training loss over time\\n\",\n\"name\": \"python\",\n\"nbconvert_exporter\": \"python\",\n\"pygments_lexer\": \"ipython2\",\n- \"version\": \"2.7.11\"\n+ \"version\": \"2.7.13\"\n}\n},\n\"nbformat\": 4,\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1606] Update notebook samples with latest code
Updated few sample notebooks to support both Python 2.x and Python 3.x |
49,738 | 24.05.2017 09:51:04 | 25,200 | eea72ce577d96fc00d5548df45672b1eb9360a9f | [HOTFIX][SYSTEMML-1621] Fix value type inference log (int/int-double) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"new_path": "src/main/java/org/apache/sysml/parser/BuiltinFunctionExpression.java",
"diff": "@@ -1173,7 +1173,7 @@ public class BuiltinFunctionExpression extends DataIdentifier\nbreak;\n}\ndefault:\n- if (this.isMathFunction()) {\n+ if( isMathFunction() ) {\ncheckMathFunctionParam();\n//unary operations\nif( getSecondExpr() == null ) {\n@@ -1186,6 +1186,9 @@ public class BuiltinFunctionExpression extends DataIdentifier\n//binary operations\nelse {\nsetBinaryOutputProperties(output);\n+ // override computed value type for special cases\n+ if( getOpCode() == BuiltinFunctionOp.LOG )\n+ output.setValueType(ValueType.DOUBLE);\n}\n}\nelse {\n"
}
] | Java | Apache License 2.0 | apache/systemds | [HOTFIX][SYSTEMML-1621] Fix value type inference log (int/int-double) |
49,738 | 25.05.2017 14:36:38 | 25,200 | c697c30ebbfb24872f379523793a2c121553bb1c | [MINOR] Fix codegen row template construction (input ordering w/ vma) | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"new_path": "src/main/java/org/apache/sysml/hops/codegen/template/TemplateRow.java",
"diff": "@@ -200,6 +200,7 @@ public class TemplateRow extends TemplateBase\ninHops.add(hop.getInput().get(0).getInput().get(0));\nout = new CNodeBinary(cdata1, cdata2, BinType.VECT_MULT_ADD);\n+ inHops2.put(\"X\", hop.getInput().get(0).getInput().get(0));\n}\nelse\n{\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fix codegen row template construction (input ordering w/ vma) |
49,737 | 25.05.2017 23:00:52 | 25,200 | 0a89676fa67a891bdcc87bc526071e53a35a2d87 | Kmeans isY and verb parameters should be boolean
Change Kmeans.dml isY and verb parameters to boolean.
Closes | [
{
"change_type": "MODIFY",
"old_path": "scripts/algorithms/Kmeans.dml",
"new_path": "scripts/algorithms/Kmeans.dml",
"diff": "# tol Double 0.000001 Tolerance (epsilon) for WCSS change ratio\n# samp Int 50 Average number of records per centroid in data samples\n# C String \"C.mtx\" Location to store the output matrix with the centroids\n-# isY Int 0 0 = do not write Y, 1 = write Y\n+# isY Boolean FALSE do not write Y\n# Y String \"Y.mtx\" Location to store the mapping of records to centroids\n# fmt String \"text\" Matrix output format, usually \"text\" or \"csv\"\n-# verb Int 0 0 = do not print per-iteration stats, 1 = print them\n+# verb Boolean FALSE do not print per-iteration stats\n# ----------------------------------------------------------------------------\n#\n# Example:\n# hadoop jar SystemML.jar -f Kmeans.dml -nvargs X=X.mtx k=5 C=centroids.mtx\n-# hadoop jar SystemML.jar -f Kmeans.dml -nvargs X=X.mtx k=5 runs=100 maxi=5000 tol=0.00000001 samp=20 C=centroids.mtx isY=1 Y=clusters.mtx verb=1\n+# hadoop jar SystemML.jar -f Kmeans.dml -nvargs X=X.mtx k=5 runs=100 maxi=5000 tol=0.00000001 samp=20 C=centroids.mtx isY=TRUE Y=clusters.mtx verb=TRUE\nfileX = $X;\nfileY = ifdef ($Y, \"Y.mtx\");\n@@ -51,8 +51,8 @@ num_centroids = $k;\nnum_runs = ifdef ($runs, 10); # $runs=10;\nmax_iter = ifdef ($maxi, 1000); # $maxi=1000;\neps = ifdef ($tol, 0.000001); # $tol=0.000001;\n-is_write_Y = ifdef ($isY, 0); # $isY=0;\n-is_verbose = ifdef ($verb, 0); # $verb=0;\n+is_write_Y = ifdef ($isY, FALSE); # $isY=FALSE;\n+is_verbose = ifdef ($verb, FALSE); # $verb=FALSE;\nfmtCY = ifdef ($fmt, \"text\"); # $fmt=\"text\";\navg_sample_size_per_centroid = ifdef ($samp, 50); # $samp=50;\n@@ -149,7 +149,7 @@ parfor (run_index in 1 : num_runs, check = 0)\n# Compute the current centroid-based within-cluster sum of squares (WCSS)\nwcss_old = wcss;\nwcss = sumXsq + sum (minD);\n- if (is_verbose == 1) {\n+ if (is_verbose == TRUE) {\nif (iter_count == 0) {\nprint (\"Run \" + run_index + \", At Start-Up: Centroid WCSS = \" + wcss);\n} else {\n@@ -208,7 +208,7 @@ if (num_successful_runs > 0) {\nC = All_Centroids [(num_centroids * (best_index - 1) + 1) : (num_centroids * best_index), ];\nprint (\"Writing out the best-WCSS centroids...\");\nwrite (C, fileC, format=fmtCY);\n- if (is_write_Y == 1) {\n+ if (is_write_Y == TRUE) {\nprint (\"Writing out the best-WCSS cluster labels...\");\nD = -2 * (X %*% t(C)) + t(rowSums (C ^ 2));\nP = (D <= rowMins (D));\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1380] Kmeans isY and verb parameters should be boolean
Change Kmeans.dml isY and verb parameters to boolean.
Closes #516. |
49,736 | 26.05.2017 09:56:02 | 25,200 | fd41f0253810a8d0ea62e4fb21854229dfe526ef | Remove confusing warning for disabled native BLAS | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"new_path": "src/main/java/org/apache/sysml/utils/NativeHelper.java",
"diff": "@@ -145,7 +145,7 @@ public class NativeHelper {\nLOG.warn(\"Time to load native blas: \" + timeToLoadInMilliseconds + \" milliseconds.\");\n}\nelse {\n- LOG.warn(\"Using internal Java BLAS as native BLAS support the configuration 'native.blas'=\" + userSpecifiedBLAS + \".\");\n+ LOG.debug(\"Using internal Java BLAS as native BLAS support the configuration 'native.blas'=\" + userSpecifiedBLAS + \".\");\n}\nattemptedLoading = true;\n}\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1630] Remove confusing warning for disabled native BLAS |
49,736 | 26.05.2017 12:25:36 | 25,200 | 3a1431c8486898c03bf2be5e18f33b4e35edad84 | [MINOR] Updated the documentation for mllearn and python dsl
Added design document for mllearn in BaseSystemMLClassifier class
Add Python DSL documentation
Updated Native backend documentation wrt MKL DNN
Caffe2DML documentation: minor updates | [
{
"change_type": "MODIFY",
"old_path": "docs/beginners-guide-caffe2dml.md",
"new_path": "docs/beginners-guide-caffe2dml.md",
"diff": "@@ -95,7 +95,7 @@ lenet.setStatistics(True).setExplain(True)\n# If you want to force GPU execution. Please make sure the required dependency are available.\n# lenet.setGPU(True).setForceGPU(True)\n-# (Optional but recommended) Enable native BLAS. For more detail see http://apache.github.io/incubator-systemml/native-backend\n+# (Optional but recommended) Enable native BLAS.\nlenet.setConfigProperty(\"native.blas\", \"auto\")\n# In case you want to enable experimental feature such as codegen\n@@ -106,6 +106,8 @@ lenet.fit(X_train, y_train)\nlenet.predict(X_test)\n```\n+For more detail on enabling native BLAS, please see the documentation for the [native backend](http://apache.github.io/incubator-systemml/native-backend).\n+\n## Frequently asked questions\n#### How can I speedup the training with Caffe2DML ?\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/native-backend.md",
"new_path": "docs/native-backend.md",
"diff": "@@ -74,19 +74,15 @@ sudo make install\n# After installation, you may also want to add `/opt/OpenBLAS/lib` to your LD_LIBRARY_PATH or `java.library.path`.\n```\n-You can check if the OpenBLAS on you system is compiled with OpenMP or not using following commands:\n+We also depend on GNU OpenMP (gomp) which will be installed by GCC.\n+To find the location of `gomp` on your system, please use the command `ldconfig -p | grep libgomp`.\n+If gomp is available as `/lib64/libgomp.so.1` instead of `/lib64/libgomp.so`,\n+please add a softlink to it:\n```bash\n-$ ldconfig -p | grep libopenblas.so\n-libopenblas.so (libc6,x86-64) => /opt/OpenBLAS/lib/libopenblas.so\n-$ ldd /opt/OpenBLAS/lib/libopenblas.so | grep libgomp\n-libgomp.so.1 => /lib64/libgomp.so.1\n+sudo ln -s /lib64/libgomp.so.1 /lib64/libgomp.so\n```\n-If you don't see any output after the second command, then OpenBLAS installed on your system is using its internal threading.\n-In this case, we highly recommend that you reinstall OpenBLAS using the above commands.\n-\n-\n## Step 2: Install other dependencies\n```bash\n@@ -96,15 +92,6 @@ sudo yum install gcc-c++\nsudo apt-get install g++\n```\n-We also depend on GNU OpenMP (gomp) which will be installed by GCC.\n-To find the location of `gomp` on your system, please use the command `ldconfig -p | grep libgomp`.\n-If gomp is available as `/lib64/libgomp.so.1` instead of `/lib64/libgomp.so`,\n-please add a softlink to it:\n-\n-```bash\n-sudo ln -s /lib64/libgomp.so.1 /lib64/libgomp.so\n-```\n-\n## Step 3: Provide the location of the native libraries\n1. Pass the location of the native libraries using command-line options:\n@@ -117,39 +104,63 @@ to the environment variable `LD_LIBRARY_PATH` (on Linux).\nIf you want to use SystemML with Spark, please add the following line to `spark-env.sh`\n(or to the bash profile).\n- ```bash\nexport LD_LIBRARY_PATH=/path/to/blas-n-other-dependencies\n- ```\n## Common issues on Linux\n-1. Unable to load `gomp`\n+- Unable to load `gomp`.\nFirst make sure if gomp is available on your system.\n- ```bash\nldconfig -p | grep libgomp\n- ```\nIf the above command returns no results, then you may have to install `gcc`.\nOn the other hand, if the above command only returns libgomp with major suffix (such as `so.1`),\nthen please execute the below command:\n- ```bash\nsudo ln -s /lib64/libgomp.so.1 /usr/lib64/libgomp.so\n- ```\n-2. Unable to load `mkl_rt`\n+- Unable to load `mkl_rt`.\nBy default, Intel MKL libraries will be installed in the location `/opt/intel/mkl/lib/intel64/`.\nMake sure that this path is accessible to Java as per instructions provided in the above section.\n-3. Unable to load `openblas`\n+- Unable to load `openblas`.\nBy default, OpenBLAS libraries will be installed in the location `/opt/OpenBLAS/lib/`.\nMake sure that this path is accessible to Java as per instructions provided in the above section.\n+- Using OpenBLAS without OpenMP can lead to performance degradation when using SystemML.\n+\n+You can check if the OpenBLAS on you system is compiled with OpenMP or not using following commands:\n+If you don't see any output after the second command, then OpenBLAS installed on your system is using its internal threading.\n+In this case, we highly recommend that you reinstall OpenBLAS using the above commands.\n+\n+ $ ldconfig -p | grep libopenblas.so\n+ libopenblas.so (libc6,x86-64) => /opt/OpenBLAS/lib/libopenblas.so\n+ $ ldd /opt/OpenBLAS/lib/libopenblas.so | grep libgomp\n+ libgomp.so.1 => /lib64/libgomp.so.1\n+\n+- Using MKL can lead to slow performance for convolution instruction.\n+\n+We noticed that double-precision MKL DNN primitives for convolution instruction\n+is considerably slower than than the corresponding single-precision MKL DNN primitives\n+as of MKL 2017 Update 1. We anticipate that this performance bug will be fixed in the future MKL versions.\n+Until then or until SystemML supports single-precision matrices, we recommend that you use OpenBLAS when using script with `conv2d`.\n+\n+Here are the runtime performance in seconds of `conv2d` on 64 images of size 256 X 256 with sparsity 0.9\n+and 32 filter of size 5x5 with stride = [1,1] and pad=[1,1].\n+\n+\n+| | MKL | OpenBLAS |\n+|-------------------------------|--------|----------|\n+| Single-precision, channels=3 | 5.144 | 7.918 |\n+| Double-precision, channels=3 | 12.599 | 8.688 |\n+| Single-precision, channels=32 | 10.765 | 21.963 |\n+| Double-precision, channels=32 | 71.118 | 34.881 |\n+\n+\n# Developer Guide\nThis section describes how to compile shared libraries in the folder `src/main/cpp/lib`.\n@@ -176,16 +187,13 @@ For this project, I typically make a directory in the `cpp` folder (this folder)\n3. Install cmake\n- ```bash\n# Centos/RedHat\nsudo yum install cmake3\n# Ubuntu\nsudo apt-get install cmake\n- ```\n4. Compile the libs using the below script.\n- ```bash\nmkdir INTEL && cd INTEL\ncmake -DUSE_INTEL_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ -DCMAKE_CXX_FLAGS=\"-DUSE_GNU_THREADING -m64\" ..\nmake install\n@@ -196,7 +204,7 @@ For this project, I typically make a directory in the `cpp` folder (this folder)\ncd ..\n# The below script helps maintain this document as well as avoid accidental inclusion of non-standard dependencies.\n./check-dependency-linux-x86_64.sh\n- ```\n+\nThe generated library files are placed in src/main/cpp/lib. This location can be changed from the CMakeLists.txt file.\n@@ -211,3 +219,4 @@ The current set of dependencies other than MKL and OpenBLAS, are as follows:\n- Additional OpenBLAS dependencies: Fortran runtime (`libgfortran.so.3`) and GCC `__float128` shared support library (`libquadmath.so.0`)\nIf CMake cannot detect your OpenBLAS installation, set the `OpenBLAS_HOME` environment variable to the OpenBLAS Home.\n+\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/python-reference.md",
"new_path": "docs/python-reference.md",
"diff": "@@ -40,16 +40,16 @@ To understand more about DML and PyDML, we recommend that you read [Beginner's G\nFor convenience of Python users, SystemML exposes several language-level APIs that allow Python users to use SystemML\nand its algorithms without the need to know DML or PyDML. We explain these APIs in the below sections.\n-## matrix API\n+## matrix class\n-The matrix class allows users to perform linear algebra operations in SystemML using a NumPy-like interface.\n-This class supports several arithmetic operators (such as +, -, *, /, ^, etc).\n-\n-matrix class is a python wrapper that implements basic matrix\n-operators, matrix functions as well as converters to common Python\n+The matrix class is an **experimental** feature that is often referred to as Python DSL.\n+It allows the user to perform linear algebra operations in SystemML using a NumPy-like interface.\n+It implements basic matrix operators, matrix functions as well as converters to common Python\ntypes (for example: Numpy arrays, PySpark DataFrame and Pandas\nDataFrame).\n+### Operators\n+\nThe operators supported are:\n1. Arithmetic operators: +, -, *, /, //, %, \\** as well as dot\n@@ -57,51 +57,24 @@ The operators supported are:\n2. Indexing in the matrix\n3. Relational/Boolean operators: \\<, \\<=, \\>, \\>=, ==, !=, &, \\|\n-In addition, following functions are supported for matrix:\n-\n-1. transpose\n-2. Aggregation functions: sum, mean, var, sd, max, min, argmin,\n- argmax, cumsum\n-3. Global statistical built-In functions: exp, log, abs, sqrt,\n- round, floor, ceil, sin, cos, tan, asin, acos, atan, sign, solve\n-\n-For all the above functions, we always return a two dimensional matrix, especially for aggregation functions with axis.\n-For example: Assuming m1 is a matrix of (3, n), NumPy returns a 1d vector of dimension (3,) for operation m1.sum(axis=1)\n-whereas SystemML returns a 2d matrix of dimension (3, 1).\n-\n-Note: an evaluated matrix contains a data field computed by eval\n-method as DataFrame or NumPy array.\n-\n-It is important to note that matrix class also supports most of NumPy's universal functions (i.e. ufuncs).\n-The current version of NumPy explicitly disables overriding ufunc, but this should be enabled in next release.\n-Until then to test above code, please use:\n-\n-```bash\n-git clone https://github.com/niketanpansare/numpy.git\n-cd numpy\n-python setup.py install\n-```\n+This class also supports several input/output formats such as NumPy arrays, Pandas DataFrame, SciPy sparse matrix and PySpark DataFrame.\n-This will enable NumPy's functions to invoke matrix class:\n+Here is a small example that demonstrates the usage:\n```python\n-import systemml as sml\n-import numpy as np\n-m1 = sml.matrix(np.ones((3,3)) + 2)\n-m2 = sml.matrix(np.ones((3,3)) + 3)\n-np.add(m1, m2)\n+>>> import systemml as sml\n+>>> import numpy as np\n+>>> m1 = sml.matrix(np.ones((3,3)) + 2)\n+>>> m2 = sml.matrix(np.ones((3,3)) + 3)\n+>>> m2 = m1 * (m2 + m1)\n+>>> m4 = 1.0 - m2\n+>>> m4.sum(axis=1).toNumPy()\n+array([[-60.],\n+ [-60.],\n+ [-60.]])\n```\n-The matrix class doesnot support following ufuncs:\n-\n-- Complex number related ufunc (for example: `conj`)\n-- Hyperbolic/inverse-hyperbolic functions (for example: sinh, arcsinh, cosh, ...)\n-- Bitwise operators\n-- Xor operator\n-- Infinite/Nan-checking (for example: isreal, iscomplex, isfinite, isinf, isnan)\n-- Other ufuncs: copysign, nextafter, modf, frexp, trunc.\n-\n-This class also supports several input/output formats such as NumPy arrays, Pandas DataFrame, SciPy sparse matrix and PySpark DataFrame.\n+### Lazy evaluation\nBy default, the operations are evaluated lazily to avoid conversion overhead and also to maximize optimization scope.\nTo disable lazy evaluation, please us `set_lazy` method:\n@@ -130,28 +103,123 @@ save(mVar4, \" \")\n# This matrix (mVar8) is backed by NumPy array. To fetch the NumPy array, invoke toNumPy() method.\n```\n-### Usage:\n+Since matrix is backed by lazy evaluation and uses a recursive Depth First Search (DFS),\n+you may run into `RuntimeError: maximum recursion depth exceeded`.\n+Please see below [troubleshooting steps](http://apache.github.io/incubator-systemml/python-reference#maximum-recursion-depth-exceeded)\n+\n+\n+### Built-in functions\n+\n+In addition to the above mentioned operators, following functions are supported.\n+\n+- transpose: Transposes the input matrix.\n+\n+- Aggregation functions: prod, sum, mean, var, sd, max, min, argmin, argmax, cumsum\n+\n+| | Description | Parameters |\n+|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n+| prod(self) | Return the product of all cells in matrix | self: input matrix object |\n+| sum(self, axis=None) | Compute the sum along the specified axis | axis : int, optional |\n+| mean(self, axis=None) | Compute the arithmetic mean along the specified axis | axis : int, optional |\n+| var(self, axis=None) | Compute the variance along the specified axis. We assume that delta degree of freedom is 1 (unlike NumPy which assumes ddof=0). | axis : int, optional |\n+| moment(self, moment=1, axis=None) | Calculates the nth moment about the mean | moment : int (can be 1, 2, 3 or 4), axis : int, optional |\n+| sd(self, axis=None) | Compute the standard deviation along the specified axis | axis : int, optional |\n+| max(self, other=None, axis=None) | Compute the maximum value along the specified axis | other: matrix or numpy array (& other supported types) or scalar, axis : int, optional |\n+| min(self, other=None, axis=None) | Compute the minimum value along the specified axis | other: matrix or numpy array (& other supported types) or scalar, axis : int, optional |\n+| argmin(self, axis=None) | Returns the indices of the minimum values along an axis. | axis : int, optional,(only axis=1, i.e. rowIndexMax is supported in this version) |\n+| argmax(self, axis=None) | Returns the indices of the maximum values along an axis. | axis : int, optional (only axis=1, i.e. rowIndexMax is supported in this version) |\n+| cumsum(self, axis=None) | Returns the indices of the maximum values along an axis. | axis : int, optional (only axis=0, i.e. cumsum along the rows is supported in this version) |\n+\n+- Global statistical built-In functions: exp, log, abs, sqrt, round, floor, ceil, sin, cos, tan, asin, acos, atan, sign, solve\n+\n+| | Description | Parameters |\n+|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n+| solve(A, b) | Computes the least squares solution for system of linear equations A %*% x = b | A, b: input matrices |\n+\n+\n+- Built-in sampling functions: normal, uniform, poisson\n+\n+| | Description | Parameters |\n+|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n+| normal(loc=0.0, scale=1.0, size=(1,1), sparsity=1.0) | Draw random samples from a normal (Gaussian) distribution. | loc: Mean (\"centre\") of the distribution, scale: Standard deviation (spread or \"width\") of the distribution, size: Output shape (only tuple of length 2, i.e. (m, n), supported), sparsity: Sparsity (between 0.0 and 1.0). |\n+| uniform(low=0.0, high=1.0, size=(1,1), sparsity=1.0) | Draw samples from a uniform distribution. | low: Lower boundary of the output interval, high: Upper boundary of the output interval, size: Output shape (only tuple of length 2, i.e. (m, n), supported), sparsity: Sparsity (between 0.0 and 1.0). |\n+| poisson(lam=1.0, size=(1,1), sparsity=1.0) | Draw samples from a Poisson distribution. | lam: Expectation of interval, should be > 0, size: Output shape (only tuple of length 2, i.e. (m, n), supported), sparsity: Sparsity (between 0.0 and 1.0). |\n+\n+- Other builtin functions: hstack, vstack, trace\n+\n+| | Description | Parameters |\n+|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n+| hstack(self, other) | Stack matrices horizontally (column wise). Invokes cbind internally. | self: lhs matrix object, other: rhs matrix object |\n+| vstack(self, other) | Stack matrices vertically (row wise). Invokes rbind internally. | self: lhs matrix object, other: rhs matrix object |\n+| trace(self) | Return the sum of the cells of the main diagonal square matrix | self: input matrix |\n+\n+Here is an example that uses the above functions and trains a simple linear regression model:\n+\n+```python\n+>>> import numpy as np\n+>>> from sklearn import datasets\n+>>> import systemml as sml\n+>>> # Load the diabetes dataset\n+>>> diabetes = datasets.load_diabetes()\n+>>> # Use only one feature\n+>>> diabetes_X = diabetes.data[:, np.newaxis, 2]\n+>>> # Split the data into training/testing sets\n+>>> X_train = diabetes_X[:-20]\n+>>> X_test = diabetes_X[-20:]\n+>>> # Split the targets into training/testing sets\n+>>> y_train = diabetes.target[:-20]\n+>>> y_test = diabetes.target[-20:]\n+>>> # Train Linear Regression model\n+>>> X = sml.matrix(X_train)\n+>>> y = sml.matrix(np.matrix(y_train).T)\n+>>> A = X.transpose().dot(X)\n+>>> b = X.transpose().dot(y)\n+>>> beta = sml.solve(A, b).toNumPy()\n+>>> y_predicted = X_test.dot(beta)\n+>>> print('Residual sum of squares: %.2f' % np.mean((y_predicted - y_test) ** 2))\n+Residual sum of squares: 25282.12\n+```\n+\n+For all the above functions, we always return a two dimensional matrix, especially for aggregation functions with axis.\n+For example: Assuming m1 is a matrix of (3, n), NumPy returns a 1d vector of dimension (3,) for operation m1.sum(axis=1)\n+whereas SystemML returns a 2d matrix of dimension (3, 1).\n+\n+Note: an evaluated matrix contains a data field computed by eval\n+method as DataFrame or NumPy array.\n+\n+### Support for NumPy's universal functions\n+\n+The matrix class also supports most of NumPy's universal functions (i.e. ufuncs).\n+The current version of NumPy explicitly disables overriding ufunc, but this should be enabled in next release.\n+Until then to test above code, please use:\n+\n+```bash\n+git clone https://github.com/niketanpansare/numpy.git\n+cd numpy\n+python setup.py install\n+```\n+\n+This will enable NumPy's functions to invoke matrix class:\n```python\nimport systemml as sml\nimport numpy as np\nm1 = sml.matrix(np.ones((3,3)) + 2)\nm2 = sml.matrix(np.ones((3,3)) + 3)\n-m2 = m1 * (m2 + m1)\n-m4 = 1.0 - m2\n-m4.sum(axis=1).toNumPy()\n+np.add(m1, m2)\n```\n-Output:\n+The matrix class doesnot support following ufuncs:\n-```bash\n-array([[-60.],\n- [-60.],\n- [-60.]])\n-```\n+- Complex number related ufunc (for example: `conj`)\n+- Hyperbolic/inverse-hyperbolic functions (for example: sinh, arcsinh, cosh, ...)\n+- Bitwise operators\n+- Xor operator\n+- Infinite/Nan-checking (for example: isreal, iscomplex, isfinite, isinf, isnan)\n+- Other ufuncs: copysign, nextafter, modf, frexp, trunc.\n-### Design Decisions:\n+### Design Decisions of matrix class (Developer documentation)\n1. Until eval() method is invoked, we create an AST (not exposed to\nthe user) that consist of unevaluated operations and data\n@@ -242,6 +310,10 @@ beta = ml.execute(script).get('B_out').toNumPy()\n## mllearn API\n+mllearn API is designed to be compatible with scikit-learn and MLLib.\n+The classes that are part of mllearn API are LogisticRegression, LinearRegression, SVM, NaiveBayes\n+and [Caffe2DML](http://apache.github.io/incubator-systemml/beginners-guide-caffe2dml).\n+\nThe below code describes how to use mllearn API for training:\n<div class=\"codetabs\">\n@@ -412,8 +484,6 @@ Output:\n```\n-\n-\n## Troubleshooting Python APIs\n#### Unable to load SystemML.jar into current pyspark session.\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"new_path": "src/main/scala/org/apache/sysml/api/ml/BaseSystemMLClassifier.scala",
"diff": "@@ -37,6 +37,41 @@ import org.apache.sysml.api.mlcontext.MLContext.ExplainLevel\nimport java.util.HashMap\nimport scala.collection.JavaConversions._\n+\n+/****************************************************\n+DESIGN DOCUMENT for MLLEARN API:\n+The mllearn API supports LogisticRegression, LinearRegression, SVM, NaiveBayes\n+and Caffe2DML. Every algorithm in this API has a python wrapper (implemented in the mllearn python package)\n+and a Scala class where the actual logic is implementation.\n+Both wrapper and scala class follow the below hierarchy to reuse code and simplify the implementation.\n+\n+\n+ BaseSystemMLEstimator\n+ |\n+ --------------------------------------------\n+ | |\n+BaseSystemMLClassifier BaseSystemMLRegressor\n+ ^ ^\n+ | |\n+SVM, Caffe2DML, ... LinearRegression\n+\n+\n+To conform with MLLib API, for every algorithm, we support two classes for every algorithm:\n+1. Estimator for training: For example: SVM extends Estimator[SVMModel].\n+2. Model for prediction: For example: SVMModel extends Model[SVMModel]\n+\n+Both BaseSystemMLRegressor and BaseSystemMLClassifier implements following methods for training:\n+1. For compatibility with scikit-learn: baseFit(X_mb: MatrixBlock, y_mb: MatrixBlock, sc: SparkContext): MLResults\n+2. For compatibility with MLLib: baseFit(df: ScriptsUtils.SparkDataType, sc: SparkContext): MLResults\n+\n+In the above methods, we execute the DML script for the given algorithm using MLContext.\n+The missing piece of the puzzle is how does BaseSystemMLRegressor and BaseSystemMLClassifier interfaces\n+get the DML script. To enable this, each wrapper class has to implement following methods:\n+1. getTrainingScript(isSingleNode:Boolean):(Script object of mlcontext, variable name of X in the script:String, variable name of y in the script:String)\n+2. getPredictionScript(isSingleNode:Boolean): (Script object of mlcontext, variable name of X in the script:String)\n+\n+****************************************************/\n+\ntrait HasLaplace extends Params {\nfinal val laplace: Param[Double] = new Param[Double](this, \"laplace\", \"Laplace smoothing specified by the user to avoid creation of 0 probabilities.\")\nsetDefault(laplace, 1.0)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Updated the documentation for mllearn and python dsl
- Added design document for mllearn in BaseSystemMLClassifier class
- Add Python DSL documentation
- Updated Native backend documentation wrt MKL DNN
- Caffe2DML documentation: minor updates |
49,736 | 26.05.2017 12:46:38 | 25,200 | b4efc3dfbdfbfc65d3150924b2e3533e557879c9 | [MINOR] Fixed the format in the native-backend documentation | [
{
"change_type": "MODIFY",
"old_path": "docs/native-backend.md",
"new_path": "docs/native-backend.md",
"diff": "@@ -74,7 +74,7 @@ sudo make install\n# After installation, you may also want to add `/opt/OpenBLAS/lib` to your LD_LIBRARY_PATH or `java.library.path`.\n```\n-We also depend on GNU OpenMP (gomp) which will be installed by GCC.\n+When using OpenBLAS, we also depend on GNU OpenMP (gomp) which will be installed by GCC.\nTo find the location of `gomp` on your system, please use the command `ldconfig -p | grep libgomp`.\nIf gomp is available as `/lib64/libgomp.so.1` instead of `/lib64/libgomp.so`,\nplease add a softlink to it:\n@@ -149,7 +149,8 @@ is considerably slower than than the corresponding single-precision MKL DNN pri\nas of MKL 2017 Update 1. We anticipate that this performance bug will be fixed in the future MKL versions.\nUntil then or until SystemML supports single-precision matrices, we recommend that you use OpenBLAS when using script with `conv2d`.\n-Here are the runtime performance in seconds of `conv2d` on 64 images of size 256 X 256 with sparsity 0.9\n+Here are the end-to-end runtime performance in seconds of 10 `conv2d` operations\n+on randomly generated 64 images of size 256 X 256 with sparsity 0.9\nand 32 filter of size 5x5 with stride = [1,1] and pad=[1,1].\n@@ -160,6 +161,9 @@ and 32 filter of size 5x5 with stride = [1,1] and pad=[1,1].\n| Single-precision, channels=32 | 10.765 | 21.963 |\n| Double-precision, channels=32 | 71.118 | 34.881 |\n+Setup used in the above experiment:\n+1. Intel MKL 2017 Update 1, OpenBLAS compiled with GNU OpenMP from source using `g++`.\n+2. CPU: `Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz`\n# Developer Guide\n@@ -179,21 +183,26 @@ Also, the C, C++ compilers and their flags are picked up by cmake when set in st\nFor this project, I typically make a directory in the `cpp` folder (this folder) and name it the config I use. For instance, `INTEL` for Intel MKL and `OPENBLAS` for OpenBLAS.\n-1. Install `g++`, OpenBLAS and MKL using the above instructions\n+- Install `g++`, OpenBLAS and MKL using the above instructions\n-2. Set `JAVA_HOME` to JDK.\n+- Set `JAVA_HOME` to JDK.\n+```bash\nexport JAVA_HOME=<path to JDK 1.8>\n+```\n-3. Install cmake\n+- Install cmake\n+```bash\n# Centos/RedHat\nsudo yum install cmake3\n# Ubuntu\nsudo apt-get install cmake\n+```\n-4. Compile the libs using the below script.\n+- Compile the libs using the below script.\n+```bash\nmkdir INTEL && cd INTEL\ncmake -DUSE_INTEL_MKL=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ -DCMAKE_CXX_FLAGS=\"-DUSE_GNU_THREADING -m64\" ..\nmake install\n@@ -204,7 +213,7 @@ For this project, I typically make a directory in the `cpp` folder (this folder)\ncd ..\n# The below script helps maintain this document as well as avoid accidental inclusion of non-standard dependencies.\n./check-dependency-linux-x86_64.sh\n-\n+```\nThe generated library files are placed in src/main/cpp/lib. This location can be changed from the CMakeLists.txt file.\n"
}
] | Java | Apache License 2.0 | apache/systemds | [MINOR] Fixed the format in the native-backend documentation |
49,738 | 28.05.2017 13:55:11 | 25,200 | b4dd2c1bf66a642cc622c57bb690e4f599de723c | Codegen multi-aggregates over compressed matrices
This patch extends the codegen support for compressed matrices to
multi-aggregate operations including tests.
Furthermore, this also includes a fix of javadocs regarding recently
removed classes. | [
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java",
"new_path": "src/main/java/org/apache/sysml/runtime/codegen/SpoofMultiAggregate.java",
"diff": "@@ -22,6 +22,7 @@ package org.apache.sysml.runtime.codegen;\nimport java.io.Serializable;\nimport java.util.ArrayList;\n+import java.util.Iterator;\nimport java.util.List;\nimport java.util.concurrent.Callable;\nimport java.util.concurrent.ExecutorService;\n@@ -30,6 +31,7 @@ import java.util.concurrent.Future;\nimport org.apache.sysml.runtime.DMLRuntimeException;\nimport org.apache.sysml.runtime.codegen.SpoofCellwise.AggOp;\n+import org.apache.sysml.runtime.compress.CompressedMatrixBlock;\nimport org.apache.sysml.runtime.functionobjects.Builtin;\nimport org.apache.sysml.runtime.functionobjects.Builtin.BuiltinCode;\nimport org.apache.sysml.runtime.functionobjects.KahanFunction;\n@@ -38,6 +40,7 @@ import org.apache.sysml.runtime.functionobjects.KahanPlusSq;\nimport org.apache.sysml.runtime.functionobjects.ValueFunction;\nimport org.apache.sysml.runtime.instructions.cp.KahanObject;\nimport org.apache.sysml.runtime.instructions.cp.ScalarObject;\n+import org.apache.sysml.runtime.matrix.data.IJV;\nimport org.apache.sysml.runtime.matrix.data.MatrixBlock;\nimport org.apache.sysml.runtime.matrix.data.SparseBlock;\nimport org.apache.sysml.runtime.util.UtilFunctions;\n@@ -95,7 +98,9 @@ public abstract class SpoofMultiAggregate extends SpoofOperator implements Seria\nif( k <= 1 ) //SINGLE-THREADED\n{\n- if( !inputs.get(0).isInSparseFormat() )\n+ if( inputs.get(0) instanceof CompressedMatrixBlock )\n+ executeCompressed((CompressedMatrixBlock)inputs.get(0), b, scalars, c, m, n, 0, m);\n+ else if( !inputs.get(0).isInSparseFormat() )\nexecuteDense(inputs.get(0).getDenseBlock(), b, scalars, c, m, n, 0, m);\nelse\nexecuteSparse(inputs.get(0).getSparseBlock(), b, scalars, c, m, n, 0, m);\n@@ -151,6 +156,15 @@ public abstract class SpoofMultiAggregate extends SpoofOperator implements Seria\n}\n}\n+ private void executeCompressed(CompressedMatrixBlock a, SideInput[] b, double[] scalars, double[] c, int m, int n, int rl, int ru) throws DMLRuntimeException\n+ {\n+ //core compressed aggregation operation\n+ Iterator<IJV> iter = a.getIterator(rl, ru, true);\n+ while( iter.hasNext() ) {\n+ IJV cell = iter.next();\n+ genexec(cell.getV(), b, scalars, c, m, n, cell.getI(), cell.getJ());\n+ }\n+ }\nprotected abstract void genexec( double a, SideInput[] b, double[] scalars, double[] c, int m, int n, int rowIndex, int colIndex);\n@@ -251,7 +265,9 @@ public abstract class SpoofMultiAggregate extends SpoofOperator implements Seria\npublic double[] call() throws DMLRuntimeException {\ndouble[] c = new double[_aggOps.length];\nsetInitialOutputValues(c);\n- if( !_a.isInSparseFormat() )\n+ if( _a instanceof CompressedMatrixBlock )\n+ executeCompressed((CompressedMatrixBlock)_a, _b, _scalars, c, _rlen, _clen, _rl, _ru);\n+ else if( !_a.isInSparseFormat() )\nexecuteDense(_a.getDenseBlock(), _b, _scalars, c, _rlen, _clen, _rl, _ru);\nelse\nexecuteSparse(_a.getSparseBlock(), _b, _scalars, c, _rlen, _clen, _rl, _ru);\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main/java/org/apache/sysml/runtime/compress/BitmapEncoder.java",
"new_path": "src/main/java/org/apache/sysml/runtime/compress/BitmapEncoder.java",
"diff": "@@ -90,9 +90,6 @@ public class BitmapEncoder\n/**\n* Encodes the bitmap as a series of run lengths and offsets.\n- * <p>\n- * <b>NOTE: This method must be kept in sync with {@link BitmapDecoderRLE}\n- * !</b>\n*\n* @param offsets uncompressed offset list\n* @param len logical length of the given offset list\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/java/org/apache/sysml/test/integration/functions/codegen/CompressedMultiAggregateTest.java",
"diff": "+/*\n+ * Licensed to the Apache Software Foundation (ASF) under one\n+ * or more contributor license agreements. See the NOTICE file\n+ * distributed with this work for additional information\n+ * regarding copyright ownership. The ASF licenses this file\n+ * to you under the Apache License, Version 2.0 (the\n+ * \"License\"); you may not use this file except in compliance\n+ * with the License. You may obtain a copy of the License at\n+ *\n+ * http://www.apache.org/licenses/LICENSE-2.0\n+ *\n+ * Unless required by applicable law or agreed to in writing,\n+ * software distributed under the License is distributed on an\n+ * \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+ * KIND, either express or implied. See the License for the\n+ * specific language governing permissions and limitations\n+ * under the License.\n+ */\n+\n+package org.apache.sysml.test.integration.functions.codegen;\n+\n+import java.io.File;\n+import java.util.HashMap;\n+\n+import org.junit.Assert;\n+import org.junit.Test;\n+import org.apache.sysml.api.DMLScript;\n+import org.apache.sysml.api.DMLScript.RUNTIME_PLATFORM;\n+import org.apache.sysml.hops.OptimizerUtils;\n+import org.apache.sysml.lops.LopProperties.ExecType;\n+import org.apache.sysml.runtime.compress.CompressedMatrixBlock;\n+import org.apache.sysml.runtime.matrix.data.MatrixValue.CellIndex;\n+import org.apache.sysml.test.integration.AutomatedTestBase;\n+import org.apache.sysml.test.integration.TestConfiguration;\n+import org.apache.sysml.test.utils.TestUtils;\n+\n+public class CompressedMultiAggregateTest extends AutomatedTestBase\n+{\n+ private static final String TEST_NAME1 = \"CompressedMultiAggregateMain\";\n+ private static final String TEST_NAME2 = \"CompressedMultiAggregateSide\";\n+ private static final String TEST_DIR = \"functions/codegen/\";\n+ private static final String TEST_CLASS_DIR = TEST_DIR + CompressedMultiAggregateTest.class.getSimpleName() + \"/\";\n+ private final static String TEST_CONF = \"SystemML-config-codegen-compress.xml\";\n+ private final static File TEST_CONF_FILE = new File(SCRIPT_DIR + TEST_DIR, TEST_CONF);\n+\n+ private static final int rows = 2023;\n+ private static final int cols = 20;\n+ private static final double sparsity1 = 0.9;\n+ private static final double sparsity2 = 0.1;\n+ private static final double sparsity3 = 0.0;\n+ private static final double eps = Math.pow(10, -6);\n+\n+ public enum SparsityType {\n+ DENSE,\n+ SPARSE,\n+ EMPTY,\n+ }\n+\n+ public enum ValueType {\n+ RAND, //UC\n+ CONST, //RLE\n+ RAND_ROUND_OLE, //OLE\n+ RAND_ROUND_DDC, //RLE\n+ }\n+\n+ @Override\n+ public void setUp() {\n+ TestUtils.clearAssertionInformation();\n+ addTestConfiguration( TEST_NAME1, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME1, new String[] { \"R\" }) );\n+ addTestConfiguration( TEST_NAME2, new TestConfiguration(TEST_CLASS_DIR, TEST_NAME2, new String[] { \"R\" }) );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseConstCP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.CONST, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseRandCP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.RAND, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseRand2CP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.RAND_ROUND_DDC, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseRand3CP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.RAND_ROUND_OLE, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseConstCP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.CONST, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseRandCP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.RAND, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseRand2CP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.RAND_ROUND_DDC, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseRand3CP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.RAND_ROUND_OLE, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyConstCP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.CONST, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyRandCP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.RAND, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyRand2CP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.RAND_ROUND_DDC, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyRand3CP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.RAND_ROUND_OLE, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideDenseConstCP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.DENSE, ValueType.CONST, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideDenseRandCP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.DENSE, ValueType.RAND, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideDenseRand2CP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.DENSE, ValueType.RAND_ROUND_DDC, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideDenseRand3CP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.DENSE, ValueType.RAND_ROUND_OLE, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideSparseConstCP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.SPARSE, ValueType.CONST, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideSparseRandCP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.SPARSE, ValueType.RAND, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideSparseRand2CP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.SPARSE, ValueType.RAND_ROUND_DDC, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideSparseRand3CP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.SPARSE, ValueType.RAND_ROUND_OLE, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideEmptyConstCP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.EMPTY, ValueType.CONST, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideEmptyRandCP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.EMPTY, ValueType.RAND, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideEmptyRand2CP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.EMPTY, ValueType.RAND_ROUND_DDC, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateSideEmptyRand3CP() {\n+ testCompressedMultiAggregate( TEST_NAME2, SparsityType.EMPTY, ValueType.RAND_ROUND_OLE, ExecType.CP );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseConstSP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.CONST, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseRandSP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.RAND, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseRand2SP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.RAND_ROUND_DDC, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainDenseRand3SP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.DENSE, ValueType.RAND_ROUND_OLE, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseConstSP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.CONST, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseRandSP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.RAND, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseRand2SP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.RAND_ROUND_DDC, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainSparseRand3SP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.SPARSE, ValueType.RAND_ROUND_OLE, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyConstSP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.CONST, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyRandSP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.RAND, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyRand2SP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.RAND_ROUND_DDC, ExecType.SPARK );\n+ }\n+\n+ @Test\n+ public void testCompressedMultiAggregateMainEmptyRand3SP() {\n+ testCompressedMultiAggregate( TEST_NAME1, SparsityType.EMPTY, ValueType.RAND_ROUND_OLE, ExecType.SPARK );\n+ }\n+\n+ //TODO compressed side inputs in spark\n+\n+\n+ private void testCompressedMultiAggregate(String testname, SparsityType stype, ValueType vtype, ExecType et)\n+ {\n+ boolean oldRewrites = OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION;\n+ RUNTIME_PLATFORM platformOld = rtplatform;\n+ switch( et ){\n+ case MR: rtplatform = RUNTIME_PLATFORM.HADOOP; break;\n+ case SPARK: rtplatform = RUNTIME_PLATFORM.SPARK; break;\n+ default: rtplatform = RUNTIME_PLATFORM.HYBRID_SPARK; break;\n+ }\n+\n+ boolean sparkConfigOld = DMLScript.USE_LOCAL_SPARK_CONFIG;\n+ if( rtplatform == RUNTIME_PLATFORM.SPARK || rtplatform == RUNTIME_PLATFORM.HYBRID_SPARK )\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = true;\n+\n+ try\n+ {\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = true;\n+ TestConfiguration config = getTestConfiguration(testname);\n+ loadTestConfiguration(config);\n+\n+ String HOME = SCRIPT_DIR + TEST_DIR;\n+ fullDMLScriptName = HOME + testname + \".dml\";\n+ programArgs = new String[]{\"-explain\", \"-stats\",\n+ \"-args\", input(\"X\"), output(\"R\") };\n+\n+ fullRScriptName = HOME + testname + \".R\";\n+ rCmd = getRCmd(inputDir(), expectedDir());\n+\n+ //generate input data\n+ double sparsity = -1;\n+ switch( stype ){\n+ case DENSE: sparsity = sparsity1; break;\n+ case SPARSE: sparsity = sparsity2; break;\n+ case EMPTY: sparsity = sparsity3; break;\n+ }\n+\n+ //generate input data\n+ double min = (vtype==ValueType.CONST)? 10 : -10;\n+ double[][] X = TestUtils.generateTestMatrix(rows, cols, min, 10, sparsity, 7);\n+ if( vtype==ValueType.RAND_ROUND_OLE || vtype==ValueType.RAND_ROUND_DDC ) {\n+ CompressedMatrixBlock.ALLOW_DDC_ENCODING = (vtype==ValueType.RAND_ROUND_DDC);\n+ X = TestUtils.round(X);\n+ }\n+ writeInputMatrixWithMTD(\"X\", X, true);\n+\n+ //run tests\n+ runTest(true, false, null, -1);\n+ runRScript(true);\n+\n+ //compare matrices\n+ HashMap<CellIndex, Double> dmlfile = readDMLMatrixFromHDFS(\"R\");\n+ HashMap<CellIndex, Double> rfile = readRMatrixFromFS(\"R\");\n+ TestUtils.compareMatrices(dmlfile, rfile, eps, \"Stat-DML\", \"Stat-R\");\n+ Assert.assertTrue(heavyHittersContainsSubString(\"spoofMA\")\n+ || heavyHittersContainsSubString(\"sp_spoofMA\"));\n+ }\n+ finally {\n+ rtplatform = platformOld;\n+ DMLScript.USE_LOCAL_SPARK_CONFIG = sparkConfigOld;\n+ OptimizerUtils.ALLOW_ALGEBRAIC_SIMPLIFICATION = oldRewrites;\n+ OptimizerUtils.ALLOW_AUTO_VECTORIZATION = true;\n+ OptimizerUtils.ALLOW_OPERATOR_FUSION = true;\n+ }\n+ }\n+\n+ /**\n+ * Override default configuration with custom test configuration to ensure\n+ * scratch space and local temporary directory locations are also updated.\n+ */\n+ @Override\n+ protected File getConfigTemplateFile() {\n+ // Instrumentation in this test's output log to show custom configuration file used for template.\n+ System.out.println(\"This test case overrides default configuration with \" + TEST_CONF_FILE.getPath());\n+ return TEST_CONF_FILE;\n+ }\n+}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/CompressedMultiAggregateMain.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+X = readMM(paste(args[1], \"X.mtx\", sep=\"\"));\n+\n+# two fused with and without aggregation\n+R = as.matrix(sum(X/3 * X/4 * X/5) - sum(X * X/2))\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"R\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/CompressedMultiAggregateMain.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1)\n+\n+# two fused with and without aggregation\n+R = as.matrix(sum(X/3 * X/4 * X/5) - sum(X * X/2))\n+\n+write(R, $2)\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/CompressedMultiAggregateSide.R",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+args <- commandArgs(TRUE)\n+library(\"Matrix\")\n+library(\"matrixStats\")\n+\n+X = readMM(paste(args[1], \"X.mtx\", sep=\"\"));\n+M = matrix(0, nrow(X), ncol(X));\n+M[7,7] = 7;\n+\n+# two fused with and without aggregation\n+R = as.matrix(sum(M/3 * M/4 * X) - sum(M * M/2 * X))\n+\n+writeMM(as(R,\"CsparseMatrix\"), paste(args[2], \"R\", sep=\"\"));\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "src/test/scripts/functions/codegen/CompressedMultiAggregateSide.dml",
"diff": "+#-------------------------------------------------------------\n+#\n+# Licensed to the Apache Software Foundation (ASF) under one\n+# or more contributor license agreements. See the NOTICE file\n+# distributed with this work for additional information\n+# regarding copyright ownership. The ASF licenses this file\n+# to you under the Apache License, Version 2.0 (the\n+# \"License\"); you may not use this file except in compliance\n+# with the License. You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing,\n+# software distributed under the License is distributed on an\n+# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n+# KIND, either express or implied. See the License for the\n+# specific language governing permissions and limitations\n+# under the License.\n+#\n+#-------------------------------------------------------------\n+\n+X = read($1)\n+M = matrix(0, nrow(X), ncol(X));\n+M[7,7] = 7;\n+\n+# two fused with and without aggregation\n+R = as.matrix(sum(M/3 * M/4 * X) - sum(M * M/2 * X))\n+\n+write(R, $2)\n"
}
] | Java | Apache License 2.0 | apache/systemds | [SYSTEMML-1289] Codegen multi-aggregates over compressed matrices
This patch extends the codegen support for compressed matrices to
multi-aggregate operations including tests.
Furthermore, this also includes a fix of javadocs regarding recently
removed classes. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.