public class HashOuterJoin extends SparkPlan implements BinaryNode, scala.Product, scala.Serializable
Constructor and Description |
---|
HashOuterJoin(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys,
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys,
org.apache.spark.sql.catalyst.plans.JoinType joinType,
scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition,
SparkPlan left,
SparkPlan right) |
Modifier and Type | Method and Description |
---|---|
scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> |
condition() |
RDD<org.apache.spark.sql.catalyst.expressions.Row> |
execute()
Runs this query returning the result as an RDD.
|
org.apache.spark.sql.catalyst.plans.JoinType |
joinType() |
SparkPlan |
left() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> |
leftKeys() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> |
output() |
org.apache.spark.sql.catalyst.plans.physical.Partitioning |
outputPartitioning()
Specifies how data is partitioned across different nodes in the cluster.
|
scala.collection.immutable.List<org.apache.spark.sql.catalyst.plans.physical.ClusteredDistribution> |
requiredChildDistribution() |
SparkPlan |
right() |
scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> |
rightKeys() |
codegenEnabled, executeCollect, makeCopy
expressions, inputSet, missingInput, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, references, schema, schemaString, simpleString, statePrefix, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, children, collect, fastEquals, flatMap, foreach, generateTreeString, getNodeNumbered, map, mapChildren, nodeName, numberedTreeString, otherCopyArgs, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
productArity, productElement, productIterator, productPrefix
initializeIfNecessary, initializeLogging, isTraceEnabled, log_, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning
public HashOuterJoin(scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys, scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys, org.apache.spark.sql.catalyst.plans.JoinType joinType, scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition, SparkPlan left, SparkPlan right)
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> leftKeys()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Expression> rightKeys()
public org.apache.spark.sql.catalyst.plans.JoinType joinType()
public scala.Option<org.apache.spark.sql.catalyst.expressions.Expression> condition()
public SparkPlan left()
left
in interface org.apache.spark.sql.catalyst.trees.BinaryNode<SparkPlan>
public SparkPlan right()
right
in interface org.apache.spark.sql.catalyst.trees.BinaryNode<SparkPlan>
public org.apache.spark.sql.catalyst.plans.physical.Partitioning outputPartitioning()
SparkPlan
outputPartitioning
in class SparkPlan
public scala.collection.immutable.List<org.apache.spark.sql.catalyst.plans.physical.ClusteredDistribution> requiredChildDistribution()
public scala.collection.Seq<org.apache.spark.sql.catalyst.expressions.Attribute> output()
output
in class org.apache.spark.sql.catalyst.plans.QueryPlan<SparkPlan>