SparkDataFrame-class
groupedData()
agg()
summarize()
arrange()
orderBy(<SparkDataFrame>,<characterOrColumn>)
approxQuantile(<SparkDataFrame>,<character>,<numeric>,<numeric>)
as.data.frame()
attach(<SparkDataFrame>)
broadcast()
cache()
cacheTable()
checkpoint()
collect()
coltypes()
`coltypes<-`()
colnames()
`colnames<-`()
columns()
names(<SparkDataFrame>)
`names<-`(<SparkDataFrame>)
count()
n()
createDataFrame()
as.DataFrame()
createExternalTable()
createOrReplaceTempView()
createTable()
crossJoin(<SparkDataFrame>,<SparkDataFrame>)
crosstab(<SparkDataFrame>,<character>,<character>)
cube()
describe()
distinct()
unique(<SparkDataFrame>)
dim(<SparkDataFrame>)
drop()
dropDuplicates()
dropna()
na.omit()
fillna()
dtypes()
except()
exceptAll()
explain()
filter()
where()
getNumPartitions(<SparkDataFrame>)
group_by()
groupBy()
head(<SparkDataFrame>)
hint()
histogram(<SparkDataFrame>,<characterOrColumn>)
insertInto()
intersect()
intersectAll()
isLocal()
isStreaming()
join(<SparkDataFrame>,<SparkDataFrame>)
limit()
localCheckpoint()
merge()
mutate()
transform()
ncol(<SparkDataFrame>)
count(<SparkDataFrame>)
nrow(<SparkDataFrame>)
orderBy()
persist()
pivot(<GroupedData>,<character>)
printSchema()
randomSplit()
rbind()
rename()
withColumnRenamed()
registerTempTable()
repartition()
repartitionByRange()
rollup()
sample()
sample_frac()
sampleBy()
saveAsTable()
schema()
select()
`$`(<SparkDataFrame>)
`$<-`(<SparkDataFrame>)
selectExpr()
show(<Column>)
show(<GroupedData>)
show(<SparkDataFrame>)
show(<WindowSpec>)
show(<StreamingQuery>)
showDF()
str(<SparkDataFrame>)
storageLevel(<SparkDataFrame>)
subset()
`[[`(<SparkDataFrame>,<numericOrcharacter>)
`[[<-`(<SparkDataFrame>,<numericOrcharacter>)
`[`(<SparkDataFrame>)
summary()
take()
tableToDF()
toJSON(<SparkDataFrame>)
union()
unionAll()
unionByName()
unpersist()
unpivot()
melt(<SparkDataFrame>,<ANY>,<ANY>,<character>,<character>)
with()
withColumn()
read.df()
loadDF()
read.jdbc()
read.json()
read.orc()
read.parquet()
read.text()
write.df()
saveDF()
write.jdbc()
write.json()
write.orc()
write.parquet()
write.text()
approx_count_distinct()
approxCountDistinct()
collect_list()
collect_set()
count_distinct()
countDistinct()
grouping_bit()
grouping_id()
kurtosis()
max_by()
min_by()
n_distinct()
percentile_approx()
product()
sd()
skewness()
stddev()
std()
stddev_pop()
stddev_samp()
sum_distinct()
sumDistinct()
var()
variance()
var_pop()
var_samp()
max(<Column>)
mean(<Column>)
min(<Column>)
sum(<Column>)
from_avro()
to_avro()
column_collection_functions
reverse,Column-method
reverse
to_json,Column-method
to_json
to_csv,Column-method
to_csv
concat,Column-method
concat
from_json,Column,characterOrstructTypeOrColumn-method
from_json
schema_of_json,characterOrColumn-method
schema_of_json
from_csv,Column,characterOrstructTypeOrColumn-method
from_csv
schema_of_csv,characterOrColumn-method
schema_of_csv
array_aggregate,characterOrColumn,Column,function-method
array_aggregate
array_contains,Column-method
array_contains
array_distinct,Column-method
array_distinct
array_except,Column,Column-method
array_except
array_except,Column-method
array_exists,characterOrColumn,function-method
array_exists
array_filter,characterOrColumn,function-method
array_filter
array_forall,characterOrColumn,function-method
array_forall
array_intersect,Column,Column-method
array_intersect
array_intersect,Column-method
array_join,Column,character-method
array_join
array_join,Column-method
array_max,Column-method
array_max
array_min,Column-method
array_min
array_position,Column-method
array_position
array_remove,Column-method
array_remove
array_repeat,Column,numericOrColumn-method
array_repeat
array_sort,Column-method
array_sort
array_transform,characterOrColumn,function-method
array_transform
array_transform,characterOrColumn,characterOrColumn,function-method
arrays_overlap,Column,Column-method
arrays_overlap
arrays_overlap,Column-method
array_union,Column,Column-method
array_union
array_union,Column-method
arrays_zip,Column-method
arrays_zip
arrays_zip_with,characterOrColumn,characterOrColumn,function-method
arrays_zip_with
shuffle,Column-method
shuffle
flatten,Column-method
flatten
map_concat,Column-method
map_concat
map_entries,Column-method
map_entries
map_filter,characterOrColumn,function-method
map_filter
map_from_arrays,Column,Column-method
map_from_arrays
map_from_arrays,Column-method
map_from_entries,Column-method
map_from_entries
map_keys,Column-method
map_keys
transform_keys,characterOrColumn,function-method
transform_keys
transform_values,characterOrColumn,function-method
transform_values
map_values,Column-method
map_values
map_zip_with,characterOrColumn,characterOrColumn,function-method
map_zip_with
element_at,Column-method
element_at
explode,Column-method
explode
size,Column-method
size
slice,Column-method
slice
sort_array,Column-method
sort_array
posexplode,Column-method
posexplode
explode_outer,Column-method
explode_outer
posexplode_outer,Column-method
posexplode_outer
add_months()
datediff()
date_add()
date_format()
date_sub()
from_utc_timestamp()
months_between()
next_day()
to_utc_timestamp()
bin()
bround()
cbrt()
ceil()
conv()
cot()
csc()
hex()
hypot()
ln()
pmod()
rint()
sec()
shiftLeft()
shiftleft()
shiftRight()
shiftright()
shiftRightUnsigned()
shiftrightunsigned()
signum()
degrees()
toDegrees()
radians()
toRadians()
unhex()
width_bucket()
abs(<Column>)
acos(<Column>)
acosh(<Column>)
asin(<Column>)
asinh(<Column>)
atan(<Column>)
atanh(<Column>)
ceiling(<Column>)
cos(<Column>)
cosh(<Column>)
exp(<Column>)
expm1(<Column>)
factorial(<Column>)
floor(<Column>)
log(<Column>)
log10(<Column>)
log1p(<Column>)
log2(<Column>)
round(<Column>)
sign(<Column>)
sin(<Column>)
sinh(<Column>)
sqrt(<Column>)
tan(<Column>)
tanh(<Column>)
atan2(<Column>)
assert_true()
crc32()
hash()
md5()
raise_error()
sha1()
sha2()
xxhash64()
array_to_vector()
vector_to_array()
when()
bitwise_not()
bitwiseNOT()
create_array()
create_map()
expr()
greatest()
input_file_name()
isnan()
least()
lit()
monotonically_increasing_id()
nanvl()
negate()
negative()
positive()
rand()
randn()
spark_partition_id()
struct()
coalesce(<Column>)
is.nan(<Column>)
ifelse(<Column>)
ascii()
base64()
bit_length()
collate()
collation()
concat_ws()
decode()
encode()
format_number()
format_string()
initcap()
instr()
levenshtein()
locate()
lower()
lpad()
ltrim()
octet_length()
overlay()
regexp_extract()
regexp_replace()
repeat_string()
rpad()
rtrim()
split_string()
soundex()
substring_index()
translate()
trim()
unbase64()
upper()
length(<Column>)
cume_dist()
dense_rank()
lag()
lead()
nth_value()
ntile()
percent_rank()
rank()
row_number()
alias(<Column>)
alias(<SparkDataFrame>)
asc()
asc_nulls_first()
asc_nulls_last()
contains()
desc()
desc_nulls_first()
desc_nulls_last()
getField()
getItem()
isNaN()
isNull()
isNotNull()
like()
rlike()
ilike()
avg()
between()
cast()
column()
coalesce()
corr()
cov()
covar_samp()
covar_pop()
dropFields()
endsWith()
first()
last()
not()
`!`(<Column>)
otherwise()
startsWith()
substr(<Column>)
current_date()
current_timestamp()
date_trunc()
dayofmonth()
dayofweek()
dayofyear()
monthname()
dayname()
from_unixtime()
hour()
last_day()
make_date()
minute()
month()
quarter()
second()
timestamp_seconds()
to_date()
to_timestamp()
unix_timestamp()
weekofyear()
window()
year()
trunc(<Column>)
withField()
over()
predict()
partitionBy()
rangeBetween()
rowsBetween()
windowOrderBy()
windowPartitionBy()
WindowSpec-class
`%in%`(<Column>)
`%<=>%`
MLlib is Sparkâs machine learning (ML) library
AFTSurvivalRegressionModel-class
ALSModel-class
BisectingKMeansModel-class
DecisionTreeClassificationModel-class
DecisionTreeRegressionModel-class
FMClassificationModel-class
FMRegressionModel-class
FPGrowthModel-class
GBTClassificationModel-class
GBTRegressionModel-class
GaussianMixtureModel-class
GeneralizedLinearRegressionModel-class
glm(<formula>,<ANY>,<SparkDataFrame>)
IsotonicRegressionModel-class
KMeansModel-class
KSTest-class
LDAModel-class
LinearRegressionModel-class
LinearSVCModel-class
LogisticRegressionModel-class
MultilayerPerceptronClassificationModel-class
NaiveBayesModel-class
PowerIterationClustering-class
PrefixSpan-class
RandomForestClassificationModel-class
RandomForestRegressionModel-class
fitted()
freqItems(<SparkDataFrame>,<character>)
spark.als()
summary(<ALSModel>)
predict(<ALSModel>)
write.ml(<ALSModel>,<character>)
spark.bisectingKmeans()
summary(<BisectingKMeansModel>)
predict(<BisectingKMeansModel>)
fitted(<BisectingKMeansModel>)
write.ml(<BisectingKMeansModel>,<character>)
spark.decisionTree()
summary(<DecisionTreeRegressionModel>)
print(<summary.DecisionTreeRegressionModel>)
summary(<DecisionTreeClassificationModel>)
print(<summary.DecisionTreeClassificationModel>)
predict(<DecisionTreeRegressionModel>)
predict(<DecisionTreeClassificationModel>)
write.ml(<DecisionTreeRegressionModel>,<character>)
write.ml(<DecisionTreeClassificationModel>,<character>)
spark.fmClassifier()
summary(<FMClassificationModel>)
predict(<FMClassificationModel>)
write.ml(<FMClassificationModel>,<character>)
spark.fmRegressor()
summary(<FMRegressionModel>)
predict(<FMRegressionModel>)
write.ml(<FMRegressionModel>,<character>)
spark.fpGrowth()
spark.freqItemsets()
spark.associationRules()
predict(<FPGrowthModel>)
write.ml(<FPGrowthModel>,<character>)
spark.gaussianMixture()
summary(<GaussianMixtureModel>)
predict(<GaussianMixtureModel>)
write.ml(<GaussianMixtureModel>,<character>)
spark.gbt()
summary(<GBTRegressionModel>)
print(<summary.GBTRegressionModel>)
summary(<GBTClassificationModel>)
print(<summary.GBTClassificationModel>)
predict(<GBTRegressionModel>)
predict(<GBTClassificationModel>)
write.ml(<GBTRegressionModel>,<character>)
write.ml(<GBTClassificationModel>,<character>)
spark.glm()
summary(<GeneralizedLinearRegressionModel>)
print(<summary.GeneralizedLinearRegressionModel>)
predict(<GeneralizedLinearRegressionModel>)
write.ml(<GeneralizedLinearRegressionModel>,<character>)
spark.isoreg()
summary(<IsotonicRegressionModel>)
predict(<IsotonicRegressionModel>)
write.ml(<IsotonicRegressionModel>,<character>)
spark.kmeans()
summary(<KMeansModel>)
predict(<KMeansModel>)
write.ml(<KMeansModel>,<character>)
spark.kstest()
summary(<KSTest>)
print(<summary.KSTest>)
spark.lda()
spark.posterior()
spark.perplexity()
summary(<LDAModel>)
write.ml(<LDAModel>,<character>)
spark.lm()
summary(<LinearRegressionModel>)
predict(<LinearRegressionModel>)
write.ml(<LinearRegressionModel>,<character>)
spark.logit()
summary(<LogisticRegressionModel>)
predict(<LogisticRegressionModel>)
write.ml(<LogisticRegressionModel>,<character>)
spark.mlp()
summary(<MultilayerPerceptronClassificationModel>)
predict(<MultilayerPerceptronClassificationModel>)
write.ml(<MultilayerPerceptronClassificationModel>,<character>)
spark.naiveBayes()
summary(<NaiveBayesModel>)
predict(<NaiveBayesModel>)
write.ml(<NaiveBayesModel>,<character>)
spark.assignClusters()
spark.findFrequentSequentialPatterns()
spark.randomForest()
summary(<RandomForestRegressionModel>)
print(<summary.RandomForestRegressionModel>)
summary(<RandomForestClassificationModel>)
print(<summary.RandomForestClassificationModel>)
predict(<RandomForestRegressionModel>)
predict(<RandomForestClassificationModel>)
write.ml(<RandomForestRegressionModel>,<character>)
write.ml(<RandomForestClassificationModel>,<character>)
spark.survreg()
summary(<AFTSurvivalRegressionModel>)
predict(<AFTSurvivalRegressionModel>)
write.ml(<AFTSurvivalRegressionModel>,<character>)
spark.svmLinear()
predict(<LinearSVCModel>)
summary(<LinearSVCModel>)
write.ml(<LinearSVCModel>,<character>)
read.ml()
write.ml()
RetroSearch is an open source project built by @garambo | Open a GitHub Issue
Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo
HTML:
3.2
| Encoding:
UTF-8
| Version:
0.7.4