write.stream {SparkR} | R Documentation |
The data source is specified by the source
and a set of options (...).
If source
is not specified, the default data source configured by
spark.sql.sources.default will be used.
## S4 method for signature 'SparkDataFrame' write.stream(df, source = NULL, outputMode = NULL, ...) write.stream(df, source = NULL, outputMode = NULL, ...)
df |
a streaming SparkDataFrame. |
source |
a name for external data source. |
outputMode |
one of 'append', 'complete', 'update'. |
... |
additional argument(s) passed to the method. |
Additionally, outputMode
specifies how data of a streaming SparkDataFrame is written to a
output data source. There are three modes:
append: Only the new rows in the streaming SparkDataFrame will be written out. This output mode can be only be used in queries that do not contain any aggregation.
complete: All the rows in the streaming SparkDataFrame will be written out every time there are some updates. This output mode can only be used in queries that contain aggregations.
update: Only the rows that were updated in the streaming SparkDataFrame will be written
out every time there are some updates. If the query doesn't contain aggregations,
it will be equivalent to append
mode.
write.stream since 2.2.0
experimental
Other SparkDataFrame functions: $
,
$,SparkDataFrame-method
, $<-
,
$<-,SparkDataFrame-method
,
select
, select
,
select,SparkDataFrame,Column-method
,
select,SparkDataFrame,character-method
,
select,SparkDataFrame,list-method
;
SparkDataFrame-class
; [
,
[,SparkDataFrame-method
, [[
,
[[,SparkDataFrame,numericOrcharacter-method
,
[[<-
,
[[<-,SparkDataFrame,numericOrcharacter-method
,
subset
, subset
,
subset,SparkDataFrame-method
;
agg
, agg
, agg
,
agg,GroupedData-method
,
agg,SparkDataFrame-method
,
summarize
, summarize
,
summarize
,
summarize,GroupedData-method
,
summarize,SparkDataFrame-method
;
arrange
, arrange
,
arrange
,
arrange,SparkDataFrame,Column-method
,
arrange,SparkDataFrame,character-method
,
orderBy,SparkDataFrame,characterOrColumn-method
;
as.data.frame
,
as.data.frame,SparkDataFrame-method
;
attach
,
attach,SparkDataFrame-method
;
cache
, cache
,
cache,SparkDataFrame-method
;
checkpoint
, checkpoint
,
checkpoint,SparkDataFrame-method
;
coalesce
, coalesce
,
coalesce
,
coalesce,Column-method
,
coalesce,SparkDataFrame-method
;
collect
, collect
,
collect,SparkDataFrame-method
;
colnames
, colnames
,
colnames,SparkDataFrame-method
,
colnames<-
, colnames<-
,
colnames<-,SparkDataFrame-method
,
columns
, columns
,
columns,SparkDataFrame-method
,
names
,
names,SparkDataFrame-method
,
names<-
,
names<-,SparkDataFrame-method
;
coltypes
, coltypes
,
coltypes,SparkDataFrame-method
,
coltypes<-
, coltypes<-
,
coltypes<-,SparkDataFrame,character-method
;
count,SparkDataFrame-method
,
nrow
, nrow
,
nrow,SparkDataFrame-method
;
createOrReplaceTempView
,
createOrReplaceTempView
,
createOrReplaceTempView,SparkDataFrame,character-method
;
crossJoin
,
crossJoin,SparkDataFrame,SparkDataFrame-method
;
dapplyCollect
, dapplyCollect
,
dapplyCollect,SparkDataFrame,function-method
;
dapply
, dapply
,
dapply,SparkDataFrame,function,structType-method
;
describe
, describe
,
describe
,
describe,SparkDataFrame,ANY-method
,
describe,SparkDataFrame,character-method
,
describe,SparkDataFrame-method
,
summary
, summary
,
summary,SparkDataFrame-method
;
dim
,
dim,SparkDataFrame-method
;
distinct
, distinct
,
distinct,SparkDataFrame-method
,
unique
,
unique,SparkDataFrame-method
;
dropDuplicates
,
dropDuplicates
,
dropDuplicates,SparkDataFrame-method
;
dropna
, dropna
,
dropna,SparkDataFrame-method
,
fillna
, fillna
,
fillna,SparkDataFrame-method
,
na.omit
, na.omit
,
na.omit,SparkDataFrame-method
;
drop
, drop
,
drop
, drop,ANY-method
,
drop,SparkDataFrame-method
;
dtypes
, dtypes
,
dtypes,SparkDataFrame-method
;
except
, except
,
except,SparkDataFrame,SparkDataFrame-method
;
explain
, explain
,
explain
,
explain,SparkDataFrame-method
,
explain,StreamingQuery-method
;
filter
, filter
,
filter,SparkDataFrame,characterOrColumn-method
,
where
, where
,
where,SparkDataFrame,characterOrColumn-method
;
first
, first
,
first
,
first,SparkDataFrame-method
,
first,characterOrColumn-method
;
gapplyCollect
, gapplyCollect
,
gapplyCollect
,
gapplyCollect,GroupedData-method
,
gapplyCollect,SparkDataFrame-method
;
gapply
, gapply
,
gapply
,
gapply,GroupedData-method
,
gapply,SparkDataFrame-method
;
getNumPartitions
,
getNumPartitions,SparkDataFrame-method
;
groupBy
, groupBy
,
groupBy,SparkDataFrame-method
,
group_by
, group_by
,
group_by,SparkDataFrame-method
;
head
,
head,SparkDataFrame-method
;
hint
, hint
,
hint,SparkDataFrame,character-method
;
histogram
,
histogram,SparkDataFrame,characterOrColumn-method
;
insertInto
, insertInto
,
insertInto,SparkDataFrame,character-method
;
intersect
, intersect
,
intersect,SparkDataFrame,SparkDataFrame-method
;
isLocal
, isLocal
,
isLocal,SparkDataFrame-method
;
isStreaming
, isStreaming
,
isStreaming,SparkDataFrame-method
;
join
,
join,SparkDataFrame,SparkDataFrame-method
;
limit
, limit
,
limit,SparkDataFrame,numeric-method
;
merge
, merge
,
merge,SparkDataFrame,SparkDataFrame-method
;
mutate
, mutate
,
mutate,SparkDataFrame-method
,
transform
, transform
,
transform,SparkDataFrame-method
;
ncol
,
ncol,SparkDataFrame-method
;
persist
, persist
,
persist,SparkDataFrame,character-method
;
printSchema
, printSchema
,
printSchema,SparkDataFrame-method
;
randomSplit
, randomSplit
,
randomSplit,SparkDataFrame,numeric-method
;
rbind
, rbind
,
rbind,SparkDataFrame-method
;
registerTempTable
,
registerTempTable
,
registerTempTable,SparkDataFrame,character-method
;
rename
, rename
,
rename,SparkDataFrame-method
,
withColumnRenamed
,
withColumnRenamed
,
withColumnRenamed,SparkDataFrame,character,character-method
;
repartition
, repartition
,
repartition,SparkDataFrame-method
;
sample
, sample
,
sample,SparkDataFrame,logical,numeric-method
,
sample_frac
, sample_frac
,
sample_frac,SparkDataFrame,logical,numeric-method
;
saveAsParquetFile
,
saveAsParquetFile
,
saveAsParquetFile,SparkDataFrame,character-method
,
write.parquet
, write.parquet
,
write.parquet,SparkDataFrame,character-method
;
saveAsTable
, saveAsTable
,
saveAsTable,SparkDataFrame,character-method
;
saveDF
, saveDF
,
saveDF,SparkDataFrame,character-method
,
write.df
, write.df
,
write.df
,
write.df,SparkDataFrame-method
;
schema
, schema
,
schema,SparkDataFrame-method
;
selectExpr
, selectExpr
,
selectExpr,SparkDataFrame,character-method
;
showDF
, showDF
,
showDF,SparkDataFrame-method
;
show
, show
,
show,Column-method
,
show,GroupedData-method
,
show,SparkDataFrame-method
,
show,StreamingQuery-method
,
show,WindowSpec-method
;
storageLevel
,
storageLevel,SparkDataFrame-method
;
str
,
str,SparkDataFrame-method
;
take
, take
,
take,SparkDataFrame,numeric-method
;
toJSON
,
toJSON,SparkDataFrame-method
;
union
, union
,
union,SparkDataFrame,SparkDataFrame-method
,
unionAll
, unionAll
,
unionAll,SparkDataFrame,SparkDataFrame-method
;
unpersist
, unpersist
,
unpersist,SparkDataFrame-method
;
withColumn
, withColumn
,
withColumn,SparkDataFrame,character-method
;
with
,
with,SparkDataFrame-method
;
write.jdbc
, write.jdbc
,
write.jdbc,SparkDataFrame,character,character-method
;
write.json
, write.json
,
write.json,SparkDataFrame,character-method
;
write.orc
, write.orc
,
write.orc,SparkDataFrame,character-method
;
write.text
, write.text
,
write.text,SparkDataFrame,character-method
## Not run:
##D sparkR.session()
##D df <- read.stream("socket", host = "localhost", port = 9999)
##D isStreaming(df)
##D wordCounts <- count(group_by(df, "value"))
##D
##D # console
##D q <- write.stream(wordCounts, "console", outputMode = "complete")
##D # text stream
##D q <- write.stream(df, "text", path = "/home/user/out", checkpointLocation = "/home/user/cp")
##D # memory stream
##D q <- write.stream(wordCounts, "memory", queryName = "outs", outputMode = "complete")
##D head(sql("SELECT * from outs"))
##D queryName(q)
##D
##D stopQuery(q)
## End(Not run)