colGroups, MatrixBlock that, Matr
LMMParallel(noPreAggGroups, preAggGroups, that, ret, null, overlapping, k);
}
- ret.recomputeNonZeros();
+ ret.recomputeNonZeros(k);
ret.examSparsity();
return ret;
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMMChain.java b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMMChain.java
index bc164a5e91b..060c7368717 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMMChain.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMMChain.java
@@ -35,6 +35,21 @@
import org.apache.sysds.runtime.matrix.data.MatrixBlock;
import org.apache.sysds.runtime.matrix.operators.BinaryOperator;
+/**
+ * Support compressed MM chain operation to fuse the following cases :
+ *
+ *
+ * XtXv == (t(X) %*% (X %*% v))
+ *
+ *
+ *
+ * XtwXv == (t(X) %*% (w * (X %*% v)))
+ *
+ *
+ *
+ * XtXvy == (t(X) %*% ((X %*% v) - y))
+ *
+ */
public final class CLALibMMChain {
static final Log LOG = LogFactory.getLog(CLALibMMChain.class.getName());
@@ -42,6 +57,33 @@ private CLALibMMChain() {
// private constructor
}
+ /**
+ * Support compressed MM chain operation to fuse the following cases :
+ *
+ *
+ * XtXv == (t(X) %*% (X %*% v))
+ *
+ *
+ *
+ * XtwXv == (t(X) %*% (w * (X %*% v)))
+ *
+ *
+ *
+ * XtXvy == (t(X) %*% ((X %*% v) - y))
+ *
+ *
+ * Note the point of this optimization is that v and w always are vectors. This means in practice the all the compute
+ * is faster if the intermediates are exploited.
+ *
+ *
+ * @param x Is the X part of the chain optimized kernel
+ * @param v Is the mandatory v part of the chain
+ * @param w Is the optional w port of t the chain
+ * @param out The output to put the result into. Can also be returned and in some cases will not be used.
+ * @param ctype either XtwXv, XtXv or XtXvy
+ * @param k the parallelization degree
+ * @return The result either in the given output or a new allocation
+ */
public static MatrixBlock mmChain(CompressedMatrixBlock x, MatrixBlock v, MatrixBlock w, MatrixBlock out,
ChainType ctype, int k) {
diff --git a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMatrixMult.java b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMatrixMult.java
index 185b684f652..5b392e8b7e0 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMatrixMult.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibMatrixMult.java
@@ -63,11 +63,11 @@ public static MatrixBlock matrixMultiply(MatrixBlock m1, MatrixBlock m2, MatrixB
}
if(!(m1 instanceof CompressedMatrixBlock) && transposeLeft) {
- m1 = LibMatrixReorg.transpose(m1, k);
+ m1 = LibMatrixReorg.transpose(m1, k, true);
transposeLeft = false;
}
else if(!(m2 instanceof CompressedMatrixBlock) && transposeRight) {
- m2 = LibMatrixReorg.transpose(m2, k);
+ m2 = LibMatrixReorg.transpose(m2, k, true);
transposeRight = false;
}
}
@@ -87,7 +87,7 @@ else if(!(m2 instanceof CompressedMatrixBlock) && transposeRight) {
LOG.warn("Transposing decompression");
ret = ((CompressedMatrixBlock) ret).decompress(k);
}
- ret = LibMatrixReorg.transpose(ret, k);
+ ret = LibMatrixReorg.transpose(ret, k, true);
}
return ret;
diff --git a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibRightMultBy.java b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibRightMultBy.java
index 39468b0cab8..2eef5f9f3f8 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibRightMultBy.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibRightMultBy.java
@@ -243,7 +243,9 @@ private static boolean RMMParallel(List filteredGroups, MatrixBlock t
catch(InterruptedException | ExecutionException e) {
throw new DMLRuntimeException(e);
}
- pool.shutdown();
+ finally{
+ pool.shutdown();
+ }
return containsNull;
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibScalar.java b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibScalar.java
index 0da3f2d9690..3dea7f577a9 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibScalar.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibScalar.java
@@ -58,7 +58,7 @@ private CLALibScalar() {
public static MatrixBlock scalarOperations(ScalarOperator sop, CompressedMatrixBlock m1, MatrixValue result) {
if(isInvalidForCompressedOutput(m1, sop)) {
- LOG.warn("scalar overlapping not supported for op: " + sop.fn);
+ LOG.warn("scalar overlapping not supported for op: " + sop.fn.getClass().getSimpleName());
MatrixBlock m1d = m1.decompress(sop.getNumThreads());
return m1d.scalarOperations(sop, result);
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSeparator.java b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSeparator.java
index 42415bcce45..d78b053ad72 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSeparator.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSeparator.java
@@ -44,8 +44,8 @@ public interface CLALibSeparator {
* @return A split of the groups and their dictionaries.
*/
public static SeparatedGroups split(List gs) {
- List dicts = new ArrayList<>();
- List indexStructures = new ArrayList<>();
+ List dicts = new ArrayList<>(gs.size());
+ List indexStructures = new ArrayList<>(gs.size());
for(AColGroup g : gs) {
if(g instanceof ADictBasedColGroup) {
ADictBasedColGroup dg = (ADictBasedColGroup) g;
@@ -68,24 +68,25 @@ public static SeparatedGroups split(List gs) {
* @param gs groups to combine with dictionaries
* @param d dictionaries to combine back into the groups.
* @param blen The block size.
- * @return A combined list of columngroups.
+ * @return A combined list of column groups.
*/
public static List combine(List gs, Map> d, int blen) {
int gid = 0;
+ int s = 0;
+ for(List e : d.values())
+ s += e.size();
+
+ if(gs.size() != s)
+ throw new RuntimeException(
+ "Invalid combine of of groups and dictionaries groups:" + gs.size() + " vs dicts" + s);
for(int i = 0; i < d.size(); i++) {
List dd = d.get(i);
for(int j = 0; j < dd.size(); j++) {
IDictionary ddd = dd.get(j);
- if(!(ddd instanceof PlaceHolderDict)) {
-
- AColGroup g = gs.get(gid);
- while(!(g instanceof ADictBasedColGroup)) {
- gid++;
- g = gs.get(gid);
- }
+ AColGroup g = gs.get(gid);
+ if(g instanceof ADictBasedColGroup) {
ADictBasedColGroup dg = (ADictBasedColGroup) g;
-
gs.set(gid, dg.copyAndSet(ddd));
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSlice.java b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSlice.java
index 5ba36923b9a..06198c3f2e0 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSlice.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibSlice.java
@@ -32,8 +32,6 @@
import org.apache.sysds.runtime.compress.colgroup.AColGroup;
import org.apache.sysds.runtime.compress.colgroup.ColGroupConst;
import org.apache.sysds.runtime.compress.colgroup.ColGroupEmpty;
-import org.apache.sysds.runtime.compress.colgroup.indexes.ColIndexFactory;
-import org.apache.sysds.runtime.compress.colgroup.indexes.IColIndex;
import org.apache.sysds.runtime.data.DenseBlock;
import org.apache.sysds.runtime.matrix.data.MatrixBlock;
import org.apache.sysds.runtime.util.CommonThreadPool;
@@ -146,7 +144,6 @@ private static MatrixBlock sliceRowsDecompress(CompressedMatrixBlock cmb, int rl
public static MatrixBlock sliceRowsCompressed(CompressedMatrixBlock cmb, int rl, int ru) {
final List groups = cmb.getColGroups();
final List newColGroups = new ArrayList<>(groups.size());
- final List emptyGroups = new ArrayList<>();
final int rue = ru + 1;
final CompressedMatrixBlock ret = new CompressedMatrixBlock(rue - rl, cmb.getNumColumns());
@@ -156,17 +153,12 @@ public static MatrixBlock sliceRowsCompressed(CompressedMatrixBlock cmb, int rl,
if(slice != null)
newColGroups.add(slice);
else
- emptyGroups.add(grp.getColIndices());
+ newColGroups.add(new ColGroupEmpty(grp.getColIndices()));
}
if(newColGroups.size() == 0)
return new MatrixBlock(rue - rl, cmb.getNumColumns(), 0.0);
- if(!emptyGroups.isEmpty()) {
- IColIndex empties = ColIndexFactory.combineIndexes(emptyGroups);
- newColGroups.add(new ColGroupEmpty(empties));
- }
-
ret.allocateColGroupList(newColGroups);
ret.setNonZeros(-1);
ret.setOverlapping(cmb.isOverlapping());
diff --git a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibStack.java b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibStack.java
index e6756604ace..e88913391f9 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibStack.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/lib/CLALibStack.java
@@ -31,7 +31,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.sysds.runtime.DMLRuntimeException;
import org.apache.sysds.runtime.compress.CompressedMatrixBlock;
-import org.apache.sysds.runtime.compress.DMLCompressionException;
import org.apache.sysds.runtime.compress.colgroup.AColGroup;
import org.apache.sysds.runtime.compress.colgroup.dictionary.IDictionary;
import org.apache.sysds.runtime.matrix.data.MatrixBlock;
@@ -51,7 +50,7 @@ private CLALibStack() {
* The intension is that the combining is able to resolve differences in the different MatrixBlocks allocation.
*
* @param m The map of Index to MatrixBLocks
- * @param d A map of the dictionaries contained in the compressionscheme
+ * @param d A map of the dictionaries contained in the compression scheme
* @param k The parallelization degree allowed for this operation
* @return The combined matrix.
*/
@@ -113,8 +112,7 @@ private static MatrixBlock combine(final Map m, Map<
return combineColumnGroups(m, d, lookup, rlen, clen, blen, k);
}
catch(Exception e) {
- // throw new RuntimeException("failed normal combine", e);
- LOG.error("Failed to combine compressed blocks, fallback to decompression.", e);
+ LOG.warn("Failed to combine compressed blocks, fallback to decompression.", e);
return combineViaDecompression(m, rlen, clen, blen, k);
}
}
@@ -144,14 +142,8 @@ private static MatrixBlock combineColumnGroups(final Map gs = cmb.getColGroups();
nGroups += gs.size();
@@ -165,20 +157,18 @@ private static MatrixBlock combineColumnGroups(final Map gs = cmb.getColGroups();
+ if(cgid + gs.size() > nGroups)
+ return combineViaDecompression(m, rlen, clen, blen, k);
for(int i = 0; i < gs.size(); i++) {
AColGroup g = gs.get(i);
final AColGroup gc = bc > 0 ? g.shiftColIndices(bc * blen) : g;
-
finalCols[cgid][br] = gc;
- if(br != 0 && (finalCols[cgid][0] == null ||
- !finalCols[cgid][br].getColIndices().equals(finalCols[cgid][0].getColIndices()))) {
- LOG.warn("Combining via decompression. There was an column with different index");
- return combineViaDecompression(m, rlen, clen, blen, k);
- }
cgid++;
-
}
}
if(cgid != finalCols.length) {
@@ -195,18 +185,20 @@ private static MatrixBlock combineColumnGroups(final Map {
- return combineN(x);
+ AColGroup r = AColGroup.appendN(x, blen, rlen);
+ return r;
}).collect(Collectors.toList());
}).get();
- if(d != null) {
- finalGroups = CLALibSeparator.combine(finalGroups, d, blen);
- }
-
if(finalGroups.contains(null)) {
LOG.warn("Combining via decompression. There was a column group that did not append ");
return combineViaDecompression(m, rlen, clen, blen, k);
}
+
+ if(d != null) {
+ finalGroups = CLALibSeparator.combine(finalGroups, d, blen);
+ }
+
return new CompressedMatrixBlock(rlen, clen, -1, false, finalGroups);
}
catch(InterruptedException | ExecutionException e) {
@@ -216,14 +208,4 @@ private static MatrixBlock combineColumnGroups(final Map get(double key) {
@Override
public DCounts inc(Double key, int c, int id) {
- // return inc((double) key, c, id);
- throw new NotImplementedException();
+ return inc((double) key, c, id);
}
@Override
diff --git a/src/main/java/org/apache/sysds/runtime/compress/utils/ACountHashMap.java b/src/main/java/org/apache/sysds/runtime/compress/utils/ACountHashMap.java
index 5182dfde48d..9e8e87c83b6 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/utils/ACountHashMap.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/utils/ACountHashMap.java
@@ -25,254 +25,254 @@
import org.apache.sysds.runtime.compress.utils.ACount.DCounts;
public abstract class ACountHashMap implements Cloneable {
- protected static final Log LOG = LogFactory.getLog(ACountHashMap.class.getName());
- protected static final int RESIZE_FACTOR = 2;
- protected static final float LOAD_FACTOR = 0.80f;
- protected static final int shortCutSize = 10;
-
- protected int size;
- protected ACount[] data;
-
- public ACountHashMap() {
- data = create(1);
- size = 0;
- }
-
- public ACountHashMap(int arrSize) {
- if(arrSize < shortCutSize)
- data = create(1);
- else {
- arrSize = (int) (arrSize * (1.0 / LOAD_FACTOR));
- arrSize += arrSize % 2 == 0 ? 1 : 0;
- data = create(arrSize);
- }
- size = 0;
- }
-
- public int size() {
- return size;
- }
-
- /**
- * Increment and return the id of the incremeted index.
- *
- * @param key The key to increment
- * @return The id of the incremented entry.
- */
- public final int increment(T key) {
- return increment(key, 1);
- }
-
- public final int increment(double key) {
- return increment(key, 1);
- }
-
- /**
- * Increment and return the id of the incremented index.
- *
- * @param key The key to increment
- * @param count The number of times to increment the value
- * @return The Id of the incremented entry.
- */
- public int increment(final T key, final int count) {
- // skip hash if data array is 1 length
- final int ix = data.length < shortCutSize ? 0 : hash(key) % data.length;
-
- try {
- return increment(key, ix, count);
- }
- catch(ArrayIndexOutOfBoundsException e) {
- if(ix < 0)
- return increment(key, 0, count);
- else
- throw new RuntimeException(e);
- }
- }
-
- private final int increment(final T key, final int ix, final int count) throws ArrayIndexOutOfBoundsException {
- final ACount l = data[ix];
- if(l == null) {
- data[ix] = create(key, size);
- // never try to resize here since we use a new unused bucket.
- return size++;
- }
- else {
- final ACount v = l.inc(key, count, size);
- if(v.id == size) {
- size++;
- resize();
- return size - 1;
- }
- else {
- // do not resize if not new.
- return v.id;
- }
- }
- }
-
- public final int increment(final double key, final int count) {
- // skip hash if data array is 1 length
- final int ix = data.length < shortCutSize ? 0 : DCounts.hashIndex(key) % data.length;
-
- try {
- return increment(key, ix, count);
- }
- catch(ArrayIndexOutOfBoundsException e) {
- if(ix < 0)
- return increment(key, 0, count);
- else
- throw new RuntimeException(e);
- }
- }
-
- private final int increment(final double key, final int ix, final int count) throws ArrayIndexOutOfBoundsException {
- final ACount l = data[ix];
- if(l == null) {
- data[ix] = create(key, size);
- // never try to resize here since we use a new unused bucket.
- return size++;
- }
- else {
- final ACount v = l.inc(key, count, size);
- if(v.id == size) {
- size++;
- resize();
- return size - 1;
- }
- else {
- // do not resize if not new.
- return v.id;
- }
- }
- }
-
- public int get(T key) {
- return getC(key).count;
- }
-
- public int getId(T key) {
- return getC(key).id;
- }
-
- public ACount getC(T key) {
- final int ix = data.length < shortCutSize ? 0 : hash(key) % data.length;
- try {
- ACount l = data[ix];
- return l != null ? l.get(key) : null;
- }
- catch(ArrayIndexOutOfBoundsException e) {
- if(ix < 0) {
- ACount l = data[0];
- return l != null ? l.get(key) : null;
- }
- else
- throw new RuntimeException(e);
- }
- }
-
- public int getOrDefault(T key, int def) {
- ACount e = getC(key);
- return (e == null) ? def : e.count;
- }
-
- public final ACount[] extractValues() {
- final ACount[] ret = create(size);
- int i = 0;
- for(ACount e : data) {
- while(e != null) {
- ret[i++] = e;
- e = e.next();
- }
- }
- return ret;
- }
-
- public T getMostFrequent() {
- T f = null;
- int fq = 0;
- for(ACount e : data) {
- while(e != null) {
- if(e.count > fq) {
- fq = e.count;
- f = e.key();
- }
- e = e.next();
- }
- }
- return f;
- }
-
- private void resize() {
- if(size >= LOAD_FACTOR * data.length && size > shortCutSize)
- // +1 to make the hash buckets better
- resize(Math.max(data.length, shortCutSize) * RESIZE_FACTOR + 1);
- }
-
- private void resize(int underlying_size) {
-
- // resize data array and copy existing contents
- final ACount[] olddata = data;
- data = create(underlying_size);
-
- // rehash all entries
- for(ACount e : olddata)
- appendValue(e);
- }
-
- protected void appendValue(ACount ent) {
- if(ent != null) {
- // take the tail recursively first
- appendValue(ent.next()); // append tail first
- ent.setNext(null); // set this tail to null.
- final int ix = hash(ent.key()) % data.length;
- try {
- appendValue(ent, ix);
- }
- catch(ArrayIndexOutOfBoundsException e) {
- if(ix < 0)
- appendValue(ent, 0);
- else
- throw new RuntimeException(e);
- }
- }
- }
-
- private void appendValue(ACount ent, int ix) {
- ACount l = data[ix];
- data[ix] = ent;
- ent.setNext(l);
- }
-
- public void sortBuckets() {
- if(size > 10)
- for(int i = 0; i < data.length; i++)
- if(data[i] != null)
- data[i] = data[i].sort();
- }
-
- public void reset(int size) {
- this.data = create(size);
- this.size = 0;
- }
-
- protected abstract ACount[] create(int size);
-
- protected abstract int hash(T key);
-
- protected abstract ACount create(T key, int id);
-
- protected ACount create(double key, int id) {
- throw new NotImplementedException();
- }
-
- @Override
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append(this.getClass().getSimpleName());
- for(int i = 0; i < data.length; i++)
- if(data[i] != null)
- sb.append(", " + data[i]);
- return sb.toString();
- }
+ protected static final Log LOG = LogFactory.getLog(ACountHashMap.class.getName());
+ protected static final int RESIZE_FACTOR = 2;
+ protected static final float LOAD_FACTOR = 0.80f;
+ protected static final int shortCutSize = 10;
+
+ protected int size;
+ protected ACount[] data;
+
+ public ACountHashMap() {
+ data = create(1);
+ size = 0;
+ }
+
+ public ACountHashMap(int arrSize) {
+ if(arrSize < shortCutSize)
+ data = create(1);
+ else {
+ arrSize = (int) (arrSize * (1.0 / LOAD_FACTOR));
+ arrSize += arrSize % 2 == 0 ? 1 : 0;
+ data = create(arrSize);
+ }
+ size = 0;
+ }
+
+ public int size() {
+ return size;
+ }
+
+ /**
+ * Increment and return the id of the incremeted index.
+ *
+ * @param key The key to increment
+ * @return The id of the incremented entry.
+ */
+ public final int increment(T key) {
+ return increment(key, 1);
+ }
+
+ public final int increment(double key) {
+ return increment(key, 1);
+ }
+
+ /**
+ * Increment and return the id of the incremented index.
+ *
+ * @param key The key to increment
+ * @param count The number of times to increment the value
+ * @return The Id of the incremented entry.
+ */
+ public synchronized int increment(final T key, final int count) {
+ // skip hash if data array is 1 length
+ final int ix = data.length < shortCutSize ? 0 : hash(key) % data.length;
+
+ try {
+ return increment(key, ix, count);
+ }
+ catch(ArrayIndexOutOfBoundsException e) {
+ if(ix < 0)
+ return increment(key, 0, count);
+ else
+ throw new RuntimeException(e);
+ }
+ }
+
+ private final int increment(final T key, final int ix, final int count) throws ArrayIndexOutOfBoundsException {
+ final ACount l = data[ix];
+ if(l == null) {
+ data[ix] = create(key, size);
+ // never try to resize here since we use a new unused bucket.
+ return size++;
+ }
+ else {
+ final ACount v = l.inc(key, count, size);
+ if(v.id == size) {
+ size++;
+ resize();
+ return size - 1;
+ }
+ else {
+ // do not resize if not new.
+ return v.id;
+ }
+ }
+ }
+
+ public synchronized final int increment(final double key, final int count) {
+ // skip hash if data array is 1 length
+ final int ix = data.length < shortCutSize ? 0 : DCounts.hashIndex(key) % data.length;
+
+ try {
+ return increment(key, ix, count);
+ }
+ catch(ArrayIndexOutOfBoundsException e) {
+ if(ix < 0)
+ return increment(key, 0, count);
+ else
+ throw new RuntimeException(e);
+ }
+ }
+
+ private final int increment(final double key, final int ix, final int count) throws ArrayIndexOutOfBoundsException {
+ final ACount l = data[ix];
+ if(l == null) {
+ data[ix] = create(key, size);
+ // never try to resize here since we use a new unused bucket.
+ return size++;
+ }
+ else {
+ final ACount v = l.inc(key, count, size);
+ if(v.id == size) {
+ size++;
+ resize();
+ return size - 1;
+ }
+ else {
+ // do not resize if not new.
+ return v.id;
+ }
+ }
+ }
+
+ public int get(T key) {
+ return getC(key).count;
+ }
+
+ public int getId(T key) {
+ return getC(key).id;
+ }
+
+ public ACount getC(T key) {
+ final int ix = data.length < shortCutSize ? 0 : hash(key) % data.length;
+ try {
+ ACount l = data[ix];
+ return l != null ? l.get(key) : null;
+ }
+ catch(ArrayIndexOutOfBoundsException e) {
+ if(ix < 0) {
+ ACount l = data[0];
+ return l != null ? l.get(key) : null;
+ }
+ else
+ throw new RuntimeException(e);
+ }
+ }
+
+ public int getOrDefault(T key, int def) {
+ ACount e = getC(key);
+ return (e == null) ? def : e.count;
+ }
+
+ public final ACount[] extractValues() {
+ final ACount[] ret = create(size);
+ int i = 0;
+ for(ACount e : data) {
+ while(e != null) {
+ ret[i++] = e;
+ e = e.next();
+ }
+ }
+ return ret;
+ }
+
+ public T getMostFrequent() {
+ T f = null;
+ int fq = 0;
+ for(ACount e : data) {
+ while(e != null) {
+ if(e.count > fq) {
+ fq = e.count;
+ f = e.key();
+ }
+ e = e.next();
+ }
+ }
+ return f;
+ }
+
+ private void resize() {
+ if(size >= LOAD_FACTOR * data.length && size > shortCutSize)
+ // +1 to make the hash buckets better
+ resize(Math.max(data.length, shortCutSize) * RESIZE_FACTOR + 1);
+ }
+
+ private void resize(int underlying_size) {
+
+ // resize data array and copy existing contents
+ final ACount[] olddata = data;
+ data = create(underlying_size);
+
+ // rehash all entries
+ for(ACount e : olddata)
+ appendValue(e);
+ }
+
+ protected void appendValue(ACount ent) {
+ if(ent != null) {
+ // take the tail recursively first
+ appendValue(ent.next()); // append tail first
+ ent.setNext(null); // set this tail to null.
+ final int ix = hash(ent.key()) % data.length;
+ try {
+ appendValue(ent, ix);
+ }
+ catch(ArrayIndexOutOfBoundsException e) {
+ if(ix < 0)
+ appendValue(ent, 0);
+ else
+ throw new RuntimeException(e);
+ }
+ }
+ }
+
+ private void appendValue(ACount ent, int ix) {
+ ACount l = data[ix];
+ data[ix] = ent;
+ ent.setNext(l);
+ }
+
+ public void sortBuckets() {
+ if(size > 10)
+ for(int i = 0; i < data.length; i++)
+ if(data[i] != null)
+ data[i] = data[i].sort();
+ }
+
+ public void reset(int size) {
+ this.data = create(size);
+ this.size = 0;
+ }
+
+ protected abstract ACount[] create(int size);
+
+ protected abstract int hash(T key);
+
+ protected abstract ACount create(T key, int id);
+
+ protected ACount create(double key, int id) {
+ throw new NotImplementedException();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append(this.getClass().getSimpleName());
+ for(int i = 0; i < data.length; i++)
+ if(data[i] != null)
+ sb.append(", " + data[i]);
+ return sb.toString();
+ }
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/utils/CompressRDDClean.java b/src/main/java/org/apache/sysds/runtime/compress/utils/CompressRDDClean.java
index 0355722ff7b..5b2e9ab3826 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/utils/CompressRDDClean.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/utils/CompressRDDClean.java
@@ -17,23 +17,21 @@
* under the License.
*/
-
package org.apache.sysds.runtime.compress.utils;
-
import org.apache.spark.api.java.function.Function;
import org.apache.sysds.runtime.compress.CompressedMatrixBlock;
import org.apache.sysds.runtime.matrix.data.MatrixBlock;
public class CompressRDDClean implements Function {
-
+
private static final long serialVersionUID = -704403012606821854L;
@Override
public MatrixBlock call(MatrixBlock mb) throws Exception {
-
- if(mb instanceof CompressedMatrixBlock){
- CompressedMatrixBlock cmb = (CompressedMatrixBlock)mb;
+
+ if(mb instanceof CompressedMatrixBlock) {
+ CompressedMatrixBlock cmb = (CompressedMatrixBlock) mb;
cmb.clearSoftReferenceToDecompressed();
return cmb;
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayCountHashMap.java b/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayCountHashMap.java
index 059834e965a..cf8771d83aa 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayCountHashMap.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayCountHashMap.java
@@ -35,7 +35,7 @@ protected final DArrCounts[] create(int size) {
return new DArrCounts[size];
}
- protected int hash(DblArray key) {
+ protected final int hash(DblArray key) {
return Math.abs(key.hashCode());
}
@@ -44,13 +44,12 @@ protected final DArrCounts create(DblArray key, int id) {
}
@Override
- public DblArrayCountHashMap clone() {
+ public DblArrayCountHashMap clone() {
DblArrayCountHashMap ret = new DblArrayCountHashMap(size);
-
- for(ACount e : data)
- ret.appendValue(e);
+ for(ACount e : data)
+ ret.appendValue(e);
ret.size = size;
return ret;
- }
+ }
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayIntListHashMap.java b/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayIntListHashMap.java
index 55fbf90b464..b26e41f648c 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayIntListHashMap.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/utils/DblArrayIntListHashMap.java
@@ -20,7 +20,6 @@
package org.apache.sysds.runtime.compress.utils;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log;
@@ -35,12 +34,10 @@ public class DblArrayIntListHashMap {
protected static final int INIT_CAPACITY = 8;
protected static final int RESIZE_FACTOR = 2;
- protected static final float LOAD_FACTOR = 0.5f;
- public static int hashMissCount = 0;
+ protected static final float LOAD_FACTOR = 0.8f;
- protected int _size = -1;
-
- protected DArrayIListEntry[] _data = null;
+ protected int _size;
+ protected DArrayIListEntry[] _data;
public DblArrayIntListHashMap() {
_data = new DArrayIListEntry[INIT_CAPACITY];
@@ -57,64 +54,21 @@ public int size() {
}
public IntArrayList get(DblArray key) {
- // probe for early abort
- if(_size == 0)
- return null;
- // compute entry index position
- int hash = key.hashCode();
- int ix = indexFor(hash, _data.length);
-
- // find entry
-
- while(_data[ix] != null && !_data[ix].keyEquals(key)) {
- hash = Integer.hashCode(hash + 1); // hash of hash
- ix = indexFor(hash, _data.length);
- hashMissCount++;
- }
- DArrayIListEntry e = _data[ix];
- if(e != null)
- return e.value;
- return null;
- }
-
- private void appendValue(DblArray key, IntArrayList value) {
- // compute entry index position
- int hash = key.hashCode();
- int ix = indexFor(hash, _data.length);
-
- // add new table entry (constant time)
- while(_data[ix] != null && !_data[ix].keyEquals(key)) {
- hash = Integer.hashCode(hash + 1); // hash of hash
- ix = indexFor(hash, _data.length);
- hashMissCount++;
- }
- _data[ix] = new DArrayIListEntry(key, value);
- _size++;
+ final int hash = key.hashCode();
+ final int ix = indexFor(hash, _data.length);
+ return _data[ix] == null ? null: _data[ix].get(key);
}
public void appendValue(DblArray key, int value) {
int hash = key.hashCode();
int ix = indexFor(hash, _data.length);
-
- while(_data[ix] != null && !_data[ix].keyEquals(key)) {
- hash = Integer.hashCode(hash + 1); // hash of hash
- ix = indexFor(hash, _data.length);
- hashMissCount++;
- }
-
- DArrayIListEntry e = _data[ix];
- if(e == null) {
- final IntArrayList lstPtr = new IntArrayList();
- lstPtr.appendValue(value);
- _data[ix] = new DArrayIListEntry(new DblArray(key), lstPtr);
+ if(_data[ix] == null) {
+ _data[ix] = new DArrayIListEntry(new DblArray(key), value);
_size++;
}
- else {
- final IntArrayList lstPtr = e.value;
- lstPtr.appendValue(value);
- }
+ else if(_data[ix].add(key, value))
+ _size++;
- // resize if necessary
if(_size >= LOAD_FACTOR * _data.length)
resize();
}
@@ -122,18 +76,17 @@ public void appendValue(DblArray key, int value) {
public List extractValues() {
List ret = new ArrayList<>();
- for(DArrayIListEntry e : _data)
- if(e != null)
+ for(DArrayIListEntry e : _data) {
+ while(e != null) {
ret.add(e);
+ e = e.next;
+ }
+ }
- // Collections.sort(ret);
return ret;
}
private void resize() {
- // check for integer overflow on resize
- if(_data.length > Integer.MAX_VALUE / RESIZE_FACTOR)
- return;
// resize data array and copy existing contents
DArrayIListEntry[] olddata = _data;
@@ -141,66 +94,112 @@ private void resize() {
_size = 0;
// rehash all entries
- for(DArrayIListEntry e : olddata)
- if(e != null)
- appendValue(e.key, e.value);
- }
-
- public void reset() {
- Arrays.fill(_data, null);
- _size = 0;
+ for(DArrayIListEntry e : olddata) {
+ while(e != null) {
+ reinsert(e.key, e.value);
+ e = e.next;
+ }
+ }
}
- public void reset(int size) {
- int newSize = Util.getPow2(size);
- if(newSize > _data.length) {
- _data = new DArrayIListEntry[newSize];
+ private void reinsert(DblArray key, IntArrayList value) {
+ // compute entry index position
+ int hash = key.hashCode();
+ int ix = indexFor(hash, _data.length);
+ if(_data[ix] == null) {
+ _data[ix] = new DArrayIListEntry(key, value);
+ _size++;
}
else {
- Arrays.fill(_data, null);
- // only allocate new if the size is smaller than 2x
- if(size < _data.length / 2)
- _data = new DArrayIListEntry[newSize];
+ _data[ix].reinsert(key, value);
+ _size++;
}
- _size = 0;
}
- protected static int indexFor(int h, int length) {
+ private static int indexFor(int h, int length) {
return h & (length - 1);
}
public static class DArrayIListEntry {
- public DblArray key;
- public IntArrayList value;
+ public final DblArray key;
+ public final IntArrayList value;
+ private DArrayIListEntry next;
+
+ private DArrayIListEntry(DblArray key, int value) {
+ this.key = key;
+ this.value = new IntArrayList();
+ this.value.appendValue(value);
+ next = null;
+ }
+
+ private DArrayIListEntry(DblArray key, IntArrayList value) {
+ this.key = key;
+ this.value = value;
+ next = null;
+ }
+
+ private final boolean reinsert(final DblArray key, final IntArrayList value) {
+ DArrayIListEntry e = this;
+ while(e.next != null)
+ e = e.next;
+
+ e.next = new DArrayIListEntry(key, value);
+ return true;
+ }
- public DArrayIListEntry(DblArray ekey, IntArrayList evalue) {
- key = ekey;
- value = evalue;
+ private final boolean add(final DblArray key, final int value) {
+ DArrayIListEntry e = this;
+ if(e.key.equals(key)) {
+ this.value.appendValue(value);
+ return false;
+ }
+ while(e.next != null) {
+ e = e.next;
+ if(e.key.equals(key)) {
+ e.value.appendValue(value);
+ return false;
+ }
+ }
+ e.next = new DArrayIListEntry(new DblArray(key), new IntArrayList());
+ e.next.value.appendValue(value);
+ return true;
}
- @Override
- public String toString() {
- return key + ":" + value;
+ private IntArrayList get(DblArray key) {
+ DArrayIListEntry e = this;
+ boolean eq = e.key.equals(key);
+ while(e.next != null && !eq) {
+ e = e.next;
+ eq = e.key.equals(key);
+ }
+ return eq ? e.value : null;
}
- public boolean keyEquals(DblArray keyThat) {
- return key.equals(keyThat);
+ private void toString(StringBuilder sb) {
+ DArrayIListEntry e = this;
+ while(e != null) {
+ sb.append(e.key);
+ sb.append(":");
+ sb.append(e.value);
+ if(e.next != null)
+ sb.append(" -> ");
+ e = e.next;
+ }
}
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
- sb.append(this.getClass().getSimpleName() + this.hashCode());
+ sb.append(this.getClass().getSimpleName());
sb.append(" " + _size);
for(int i = 0; i < _data.length; i++) {
DArrayIListEntry ent = _data[i];
if(ent != null) {
sb.append("\n");
- sb.append("id:" + i);
sb.append("[");
- sb.append(ent);
+ ent.toString(sb);
sb.append("]");
}
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/utils/DoubleCountHashMap.java b/src/main/java/org/apache/sysds/runtime/compress/utils/DoubleCountHashMap.java
index ba85964bc17..664d8485cfa 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/utils/DoubleCountHashMap.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/utils/DoubleCountHashMap.java
@@ -48,18 +48,38 @@ protected final DCounts create(Double key, int id) {
}
public double[] getDictionary() {
+ return getDictionary(size);
+ }
+
+
+ public double[] getDictionary(int size) {
double[] ret = new double[size];
for(int i = 0; i < data.length; i++) {
ACount e = data[i];
while(e != null) {
- ret[e.id] = e.key();
+ if(e.id >= 0)
+ ret[e.id] = e.key();
e = e.next();
}
}
-
return ret;
}
+
+ public void replaceWithUIDs(double v) {
+ int i = 0;
+ for(ACount e : data) {
+ while(e != null) {
+ if(!e.key().equals(v))
+ e.id = i++;
+ else
+ e.id = -1;
+ e = e.next();
+ }
+ }
+
+ }
+
public void replaceWithUIDsNoZero() {
int i = 0;
Double z = Double.valueOf(0.0);
diff --git a/src/main/java/org/apache/sysds/runtime/compress/utils/IntArrayList.java b/src/main/java/org/apache/sysds/runtime/compress/utils/IntArrayList.java
index b787c2ee1d6..1edd33a74bc 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/utils/IntArrayList.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/utils/IntArrayList.java
@@ -52,7 +52,7 @@ public int size() {
public void appendValue(int value) {
// allocate or resize array if necessary
- if(_size + 1 >= _data.length)
+ if(_size + 1 > _data.length)
resize();
// append value
@@ -60,6 +60,14 @@ public void appendValue(int value) {
_size++;
}
+ public void appendValue(IntArrayList value) {
+ // allocate or resize array if necessary
+ if(_size + value._size >= _data.length)
+ _data = Arrays.copyOf(_data, _size + value._size);
+ System.arraycopy(value._data, 0, _data, _size, value._size);
+ _size = _size + value._size;
+ }
+
/**
* Returns the underlying array of offsets. Note that this array might be physically larger than the actual length of
* the offset lists. Use size() to obtain the actual length.
@@ -75,20 +83,22 @@ public int get(int index) {
}
public int[] extractValues(boolean trim) {
- int[] ret = extractValues();
- return (trim && _size < ret.length) ? Arrays.copyOfRange(ret, 0, _size) : ret;
+ if(trim ){
+ if(_data.length == _size)
+ return _data;
+ return Arrays.copyOfRange(_data, 0, _size);
+ }
+ else
+ return _data;
}
private void resize() {
- // check for integer overflow on resize
- if(_data.length > Integer.MAX_VALUE / RESIZE_FACTOR)
- throw new RuntimeException("IntArrayList resize leads to integer overflow: size=" + _size);
// resize data array and copy existing contents
_data = Arrays.copyOf(_data, _data.length * RESIZE_FACTOR);
}
- public void reset(){
+ public void reset() {
_size = 0;
}
diff --git a/src/main/java/org/apache/sysds/runtime/compress/workload/WorkloadAnalyzer.java b/src/main/java/org/apache/sysds/runtime/compress/workload/WorkloadAnalyzer.java
index 68b60438fa9..a4c15b2b533 100644
--- a/src/main/java/org/apache/sysds/runtime/compress/workload/WorkloadAnalyzer.java
+++ b/src/main/java/org/apache/sysds/runtime/compress/workload/WorkloadAnalyzer.java
@@ -60,7 +60,6 @@
import org.apache.sysds.parser.StatementBlock;
import org.apache.sysds.parser.WhileStatement;
import org.apache.sysds.parser.WhileStatementBlock;
-import org.apache.sysds.runtime.compress.DMLCompressionException;
import org.apache.sysds.runtime.compress.workload.AWTreeNode.WTNodeType;
import org.apache.sysds.utils.Explain;
@@ -68,7 +67,7 @@ public class WorkloadAnalyzer {
private static final Log LOG = LogFactory.getLog(WorkloadAnalyzer.class.getName());
// indicator for more aggressive compression of intermediates
public static boolean ALLOW_INTERMEDIATE_CANDIDATES = false;
- // avoid wtree construction for assumptionly already compressed intermediates
+ // avoid w-tree construction for already compressed intermediates
// (due to conditional control flow this might miss compression opportunities)
public static boolean PRUNE_COMPRESSED_INTERMEDIATES = true;
@@ -96,6 +95,7 @@ public static Map getAllCandidateWorkloads(DMLProgram prog) {
// construct workload tree for candidate
WorkloadAnalyzer wa = new WorkloadAnalyzer(prog);
WTreeRoot tree = wa.createWorkloadTree(cand);
+
map.put(cand.getHopID(), tree);
allWAs.add(wa);
}
@@ -337,6 +337,7 @@ private void createWorkloadTree(Hop hop, DMLProgram prog, AWTreeNode parent, Set
}
private void createOp(Hop hop, AWTreeNode parent) {
+
if(hop.getDataType().isMatrix()) {
Op o = null;
if(HopRewriteUtils.isData(hop, OpOpData.PERSISTENTREAD, OpOpData.TRANSIENTREAD))
@@ -425,7 +426,11 @@ else if(HopRewriteUtils.isBinary(hop, OpOp2.RBIND)) {
o.setOverlapping();
}
else if(ol) {
- treeLookup.get(in.get(0).getHopID()).setDecompressing();
+ if(in.get(0) != null) {
+ Op oo = treeLookup.get(in.get(0).getHopID());
+ if(oo != null)
+ oo.setDecompressing();
+ }
return;
}
else {
@@ -500,16 +505,15 @@ else if(isCompressed(o2)) {
setDecompressionOnAllInputs(hop, parent);
}
}
- else if(hop instanceof ParameterizedBuiltinOp) {
+ else if(hop instanceof ParameterizedBuiltinOp || hop instanceof NaryOp) {
setDecompressionOnAllInputs(hop, parent);
return;
}
- else if(hop instanceof NaryOp){
+ else {
+ LOG.warn("Unknown Hop:" + hop.getClass().getSimpleName() + "\n" + Explain.explain(hop));
setDecompressionOnAllInputs(hop, parent);
return;
}
- else
- throw new DMLCompressionException("Unknown Hop:" +hop.getClass().getSimpleName() +"\n" + Explain.explain(hop));
o = o != null ? o : new OpNormal(hop, RewriteCompressedReblock.satisfiesSizeConstraintsForCompression(hop));
treeLookup.put(hop.getHopID(), o);
diff --git a/src/main/java/org/apache/sysds/runtime/controlprogram/context/SparkExecutionContext.java b/src/main/java/org/apache/sysds/runtime/controlprogram/context/SparkExecutionContext.java
index 14eff0df5da..687377345cc 100644
--- a/src/main/java/org/apache/sysds/runtime/controlprogram/context/SparkExecutionContext.java
+++ b/src/main/java/org/apache/sysds/runtime/controlprogram/context/SparkExecutionContext.java
@@ -1769,7 +1769,7 @@ public static long getStorageSpaceUsed() {
*
* @return spark cluster configuration
*/
- public static SparkClusterConfig getSparkClusterConfig() {
+ public synchronized static SparkClusterConfig getSparkClusterConfig() {
//lazy creation of spark cluster config
if( _sconf == null )
_sconf = new SparkClusterConfig();
@@ -1782,8 +1782,7 @@ public static SparkClusterConfig getSparkClusterConfig() {
* @return broadcast memory budget
*/
public static double getBroadcastMemoryBudget() {
- return getSparkClusterConfig()
- .getBroadcastMemoryBudget();
+ return getSparkClusterConfig().getBroadcastMemoryBudget();
}
/**
diff --git a/src/main/java/org/apache/sysds/runtime/data/SparseBlockFactory.java b/src/main/java/org/apache/sysds/runtime/data/SparseBlockFactory.java
index 66f07ab6ada..6b04cf6d71d 100644
--- a/src/main/java/org/apache/sysds/runtime/data/SparseBlockFactory.java
+++ b/src/main/java/org/apache/sysds/runtime/data/SparseBlockFactory.java
@@ -19,10 +19,16 @@
package org.apache.sysds.runtime.data;
+import java.util.Arrays;
+
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.sysds.runtime.matrix.data.MatrixBlock;
-public abstract class SparseBlockFactory
-{
+public abstract class SparseBlockFactory{
+ protected static final Log LOG = LogFactory.getLog(SparseBlockFactory.class.getName());
+
public static SparseBlock createSparseBlock(int rlen) {
return createSparseBlock(MatrixBlock.DEFAULT_SPARSEBLOCK, rlen);
@@ -117,4 +123,39 @@ public static SparseBlock createIdentityMatrixWithEmptyRow(int nRowCol){
rowPtr[nRowCol+1] = nRowCol;
return new SparseBlockCSR(rowPtr, colIdx, vals, nnz);
}
+
+ /**
+ * Create a sparse block from an array. Note that the nnz count should be absolutely correct for this call to work.
+ *
+ * @param valsDense a double array of values linearized.
+ * @param nCol The number of columns in reach row.
+ * @param nnz The number of non zero values.
+ * @return A sparse block.
+ */
+ public static SparseBlock createFromArray(final double[] valsDense, final int nCol, final int nnz) {
+ final int nRow = valsDense.length / nCol;
+ if(nnz > 0) {
+
+ final int[] rowPtr = new int[nRow + 1];
+ final int[] colIdx = new int[nnz];
+ final double[] valsSparse = new double[nnz];
+ int off = 0;
+ for(int i = 0; i < valsDense.length; i++) {
+ final int mod = i % nCol;
+ if(mod == 0)
+ rowPtr[i / nCol] = off;
+ if(valsDense[i] != 0) {
+ valsSparse[off] = valsDense[i];
+ colIdx[off] = mod;
+ off++;
+ }
+ }
+ rowPtr[rowPtr.length -1] = off;
+
+ return new SparseBlockCSR(rowPtr, colIdx, valsSparse, nnz);
+ }
+ else {
+ return new SparseBlockMCSR(nRow); // empty MCSR block
+ }
+ }
}
diff --git a/src/main/java/org/apache/sysds/runtime/frame/data/FrameBlock.java b/src/main/java/org/apache/sysds/runtime/frame/data/FrameBlock.java
index 737922397e5..3efafbb30bc 100644
--- a/src/main/java/org/apache/sysds/runtime/frame/data/FrameBlock.java
+++ b/src/main/java/org/apache/sysds/runtime/frame/data/FrameBlock.java
@@ -737,15 +737,16 @@ public ValueType getColumnType(int c) {
}
public Array> getColumn(int c) {
- return _coldata[c];
+ return _coldata != null ? _coldata[c] : null;
}
public void setColumn(int c, Array> column) {
if(_coldata == null) {
_coldata = new Array[getNumColumns()];
- _nRow = column.size();
+ if(column != null)
+ _nRow = column.size();
}
- if(column.size() != _nRow)
+ else if(column != null && column.size() != _nRow)
throw new DMLRuntimeException("Invalid number of rows in set column");
_coldata[c] = column;
_msize = -1;
@@ -814,14 +815,30 @@ public void readFields(DataInput in) throws IOException {
@Override
public void writeExternal(ObjectOutput out) throws IOException {
- // redirect serialization to writable impl
- write(out);
+
+ // if((out instanceof ObjectOutputStream)){
+ // ObjectOutputStream oos = (ObjectOutputStream)out;
+ // FastBufferedDataOutputStream fos = new FastBufferedDataOutputStream(oos);
+ // write(fos); //note: cannot close fos as this would close oos
+ // fos.flush();
+ // }
+ // else{
+ write(out);
+ // }
}
@Override
public void readExternal(ObjectInput in) throws IOException {
- // redirect deserialization to writable impl
- readFields(in);
+ // if(in instanceof ObjectInputStream) {
+ // // fast deserialize of dense/sparse blocks
+ // ObjectInputStream ois = (ObjectInputStream) in;
+ // FastBufferedDataInputStream fis = new FastBufferedDataInputStream(ois);
+ // readFields(fis); // note: cannot close fos as this would close oos
+ // }
+ // else {
+ // redirect deserialization to writable impl
+ readFields(in);
+ // }
}
@Override
diff --git a/src/main/java/org/apache/sysds/runtime/frame/data/columns/Array.java b/src/main/java/org/apache/sysds/runtime/frame/data/columns/Array.java
index 7f6698ef18c..11accc814bf 100644
--- a/src/main/java/org/apache/sysds/runtime/frame/data/columns/Array.java
+++ b/src/main/java/org/apache/sysds/runtime/frame/data/columns/Array.java
@@ -42,8 +42,6 @@
*/
public abstract class Array implements Writable {
protected static final Log LOG = LogFactory.getLog(Array.class.getName());
- /** internal configuration */
- private static final boolean REUSE_RECODE_MAPS = true;
/** A soft reference to a memorization of this arrays mapping, used in transformEncode */
protected SoftReference