forked from MD2Korg/CerebralCortex-2.0-legacy
-
Notifications
You must be signed in to change notification settings - Fork 0
/
SampleDataStoreEngineMain.py
93 lines (72 loc) · 2.19 KB
/
SampleDataStoreEngineMain.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import json
from pyspark.sql.functions import *
import cerebralcortex
def main():
CC = cerebralcortex.CerebralCortex(master="local[*]", name="Memphis cStress Development App")
df = CC.get_datastream(1992)
#df.setID(1111)
# print(df)
# dp = df.datapoints
# print(df.userObj.getMetadata())
# #df.show()
#
# # cc.get_datastream(1992)
# temp = []
# for i in dp:
# print(i.getStartTime())
# day = i.getStartTime()
# day = day.strftime("%Y%m%d")
# dp = "", day, i.getStartTime(), "", i.sample
# temp.append(dp)
# print(temp)
CC.save_datastream(df)
# conf = SparkConf().setAppName("DataStore")
# sc = SparkContext(conf=conf)
# sqlContext = SQLContext(sc)
#
#
# testConfigFile = os.path.join(os.path.dirname(__file__), 'cerebralcortex.yml')
# configuration = Configuration(filepath=testConfigFile).config
#
# df = Data(sc, sqlContext, configuration).getDatastream(1992)
#
# print(df)
# dp = df.datapoints
# print(df.userObj.getMetadata())
# #df.show()
#
# # cc.get_datastream(1992)
# temp = []
# for i in dp:
# print(i.getStartTime())
# day = i.getStartTime()
# day = day.strftime("%Y%m%d")
# dp = "", day, i.getStartTime(), "", i.sample
# temp.append(dp)
# print(temp)
#
# Data(sc, sqlContext, configuration).storeDatastream(df)
# dfp = sc.parallelize(df)
# dfp2 = sqlContext.createDataFrame(dfp)
#
# dfp2.printSchema()
# dfp2.show()
#StoreData().datapoint(df)
#####################Example to calculate magnitude on the sample column#######
# computeMagnitudeUDF = udf(computeMagnitude, StringType())
# df = df.withColumn("Magnitude", computeMagnitudeUDF(col("sample")))
# df.show()
#res = Metadata(configuration).getProcessingModuleInfo(7)
#print(res)
# print(res[0][1])
#StoreMetadata().storeProcessingModule('{"key":"value"}', 8)
def computeMagnitude(sample):
data = json.loads(sample)
for val in data:
val = val + math.pow(val,2)
magni = math.sqrt(val)
return magni
def dfTodp():
pass
if __name__ == "__main__":
main()