diff --git a/atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/StreamContext.scala b/atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/StreamContext.scala index e3add16a3..aea49c0cf 100644 --- a/atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/StreamContext.scala +++ b/atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/StreamContext.scala @@ -30,7 +30,9 @@ import org.apache.pekko.stream.scaladsl.StreamConverters import org.apache.pekko.util.ByteString import com.netflix.atlas.core.model.DataExpr import com.netflix.atlas.core.model.Query +import com.netflix.atlas.core.model.StyleExpr import com.netflix.atlas.core.util.Streams +import com.netflix.atlas.eval.model.ExprType import com.netflix.atlas.eval.stream.Evaluator.DataSource import com.netflix.atlas.eval.stream.Evaluator.DataSources import com.netflix.atlas.json.JsonSupport @@ -163,14 +165,11 @@ private[stream] class StreamContext( // Check that expression is parseable and perform basic static analysis of DataExprs to // weed out expensive queries up front - val results = interpreter.eval(uri).exprs - results.foreach(_.expr.dataExprs.foreach(validateDataExpr)) - - // For hi-res streams, require more precise scoping that allows us to more efficiently - // match the data and run it only where needed. This would ideally be applied everywhere, - // but for backwards compatiblity the 1m step is opted out for now. - if (ds.step.toMillis < 60_000) { - results.foreach(_.expr.dataExprs.foreach(expr => restrictsNameAndApp(expr.query))) + val (exprType, exprs) = interpreter.parseQuery(uri) + if (exprType == ExprType.TIME_SERIES) { + exprs.foreach { + case e: StyleExpr => validateStyleExpr(e, ds) + } } // Check that there is a backend available for it @@ -181,6 +180,17 @@ private[stream] class StreamContext( } } + private def validateStyleExpr(styleExpr: StyleExpr, ds: DataSource): Unit = { + styleExpr.expr.dataExprs.foreach(validateDataExpr) + + // For hi-res streams, require more precise scoping that allows us to more efficiently + // match the data and run it only where needed. This would ideally be applied everywhere, + // but for backwards compatiblity the 1m step is opted out for now. + if (ds.step.toMillis < 60_000) { + styleExpr.expr.dataExprs.foreach(expr => restrictsNameAndApp(expr.query)) + } + } + private def validateDataExpr(expr: DataExpr): Unit = { Query .dnfList(expr.query) diff --git a/atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/EvaluatorSuite.scala b/atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/EvaluatorSuite.scala index e4364c3f2..5dc45cd0e 100644 --- a/atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/EvaluatorSuite.scala +++ b/atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/EvaluatorSuite.scala @@ -463,11 +463,11 @@ class EvaluatorSuite extends FunSuite { assertEquals(ds.step, Duration.ofMinutes(1)) } - private def validateOk(params: String): Unit = { + private def validateOk(params: String, path: String = "graph"): Unit = { val evaluator = new Evaluator(config, registry, system) val ds = new Evaluator.DataSource( "test", - s"resource:///gc-pause.dat?$params" + s"synthetic://test/$path?$params" ) evaluator.validate(ds) } @@ -585,6 +585,25 @@ class EvaluatorSuite extends FunSuite { ) } + test("validate: events raw") { + validateOk("q=name,foo,:eq,nf.cluster,www-dev,:eq,:and", path = "events") + } + + test("validate: events table") { + validateOk("q=name,foo,:eq,nf.cluster,www-dev,:eq,:and,(,value,),:table", path = "events") + } + + test("validate: traces") { + validateOk("q=nf.app,www,:eq,nf.app,db,:eq,:child", path = "traces") + } + + test("validate: trace time series") { + validateOk( + "q=app,www,:eq,app,db,:eq,:child,app,db,:eq,:sum,:span-time-series", + path = "traces/graph" + ) + } + private def invalidHiResQuery(expr: String): Unit = { val evaluator = new Evaluator(config, registry, system) val ds = new Evaluator.DataSource(