Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce dace package dependencies #1715

Closed
wants to merge 12 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/pyFV3-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ jobs:
run: |
python -m pip install --upgrade pip wheel setuptools
pip install -e ./pyFV3[develop]
pip install -e ./dace
pip install -e ./dace[testing]
- name: Download data
run: |
cd pyFV3
Expand Down
5 changes: 0 additions & 5 deletions dace/cli/dacelab.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,6 @@
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.

import argparse
import numpy
import pickle
import json

import dace
from dace.frontend.octave import parse
from dace.sdfg.nodes import AccessNode

Expand Down
6 changes: 5 additions & 1 deletion dace/cli/sdfg_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import tempfile
from typing import Dict, Literal, Set, Tuple, Union

import jinja2
import dace
from dace import memlet as mlt
from dace.sdfg import nodes as nd
Expand Down Expand Up @@ -179,6 +178,11 @@ def main():
diff_sets = _sdfg_diff(sdfg_A, sdfg_B, eq_strategy)

if args.graphical:
try:
import jinja2
except (ImportError, ModuleNotFoundError):
raise ImportError('Graphical SDFG diff requires jinja2, please install by running `pip install jinja2`')

basepath = os.path.join(os.path.dirname(os.path.realpath(dace.__file__)), 'viewer')
template_loader = jinja2.FileSystemLoader(searchpath=os.path.join(basepath, 'templates'))
template_env = jinja2.Environment(loader=template_loader)
Expand Down
6 changes: 5 additions & 1 deletion dace/cli/sdfv.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@

import dace
import tempfile
import jinja2


def partialclass(cls, *args, **kwds):
Expand Down Expand Up @@ -48,6 +47,11 @@ def view(sdfg: dace.SDFG, filename: Optional[Union[str, int]] = None, verbose: b
os.close(fd)
return

try:
import jinja2
except (ImportError, ModuleNotFoundError):
raise ImportError('SDFG.view() requires jinja2, please install by running `pip install jinja2`')

if type(sdfg) is dace.SDFG:
sdfg = dace.serialize.dumps(sdfg.to_json())

Expand Down
15 changes: 12 additions & 3 deletions dace/codegen/compiled_sdfg.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,11 @@
from typing import Any, Callable, Dict, List, Tuple, Optional, Type, Union
import warnings

import numpy as np
try:
import numpy as np
except (ImportError, ModuleNotFoundError):
np = None

import sympy as sp

from dace import data as dt, dtypes, hooks, symbolic
Expand Down Expand Up @@ -189,6 +193,11 @@ def __init__(self, sdfg, lib: ReloadableDLL, argnames: List[str] = None):
self._lastargs = ()
self.do_not_execute = False

try:
import numpy
except (ImportError, ModuleNotFoundError):
raise ImportError('Calling CompiledSDFG objects requires numpy for array marshalling.')

lib.load() # Explicitly load the library
self._init = lib.get_symbol('__dace_init_{}'.format(sdfg.name))
self._init.restype = ctypes.c_void_p
Expand All @@ -199,9 +208,9 @@ def __init__(self, sdfg, lib: ReloadableDLL, argnames: List[str] = None):
# Cache SDFG return values
self._create_new_arrays: bool = True
self._return_syms: Dict[str, Any] = None
self._retarray_shapes: List[Tuple[str, np.dtype, dtypes.StorageType, Tuple[int], Tuple[int], int]] = []
self._retarray_shapes: List[Tuple[str, dtypes.StorageType, Tuple[int], Tuple[int], int]] = []
self._retarray_is_scalar: List[bool] = []
self._return_arrays: List[np.ndarray] = []
self._return_arrays: List[dt.ArrayLike] = []
self._callback_retval_references: List[Any] = [] # Avoids garbage-collecting callback return values

# Cache SDFG argument properties
Expand Down
21 changes: 10 additions & 11 deletions dace/codegen/cppunparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,6 @@
import six
import sys
import ast
import numpy as np
import os
import tokenize
import warnings

import sympy
Expand Down Expand Up @@ -109,13 +106,15 @@
_py2c_nameconst = {True: "true", False: "false", None: "nullptr"}

_py2c_reserved = {"True": "true", "False": "false", "None": "nullptr", "inf": "INFINITY", "nan": "NAN"}

_py2c_typeconversion = {
"uint": dace.dtypes.typeclass(np.uint32),
"int": dace.dtypes.typeclass(int),
"float": dace.dtypes.typeclass(float),
"float64": dace.dtypes.typeclass(np.float64),
"str": dace.dtypes.pointer(dace.dtypes.int8)
_py2c_reserved_types = {"True": dtypes.bool_, "False": dtypes.bool_, "None": dtypes.pointer(dtypes.typeclass(None)),
"inf": dtypes.float64, "nan": dtypes.float64}

py2c_typeconversion = {
"uint": dtypes.uint32,
"int": dtypes.typeclass(int),
"float": dtypes.typeclass(float),
"float64": dtypes.float64,
"str": dtypes.pointer(dtypes.int8)
}


Expand Down Expand Up @@ -562,7 +561,7 @@ def _Constant(self, t):
if value is True or value is False or value is None:
self.write(_py2c_nameconst[value])
else:
if isinstance(value, (Number, np.bool_)):
if isinstance(value, Number) or type(value).__name__ == 'bool_':
self._Num(t)
elif isinstance(value, tuple):
self.write("(")
Expand Down
25 changes: 19 additions & 6 deletions dace/codegen/instrumentation/data/data_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
from dace import dtypes, SDFG
from dace.data import ArrayLike, Number # Type hint

import numpy as np


@dataclass
class InstrumentedDataReport:
Expand Down Expand Up @@ -76,12 +74,17 @@ def keys(self) -> Set[str]:
""" Returns the array names available in this data report. """
return self.files.keys()

def _read_array_file(self, filename: str, npdtype: np.dtype) -> Tuple[ArrayLike, ArrayLike]:
def _read_array_file(self, filename: str, npdtype) -> Tuple[ArrayLike, ArrayLike]:
"""
Reads a formatted instrumented data file.

:return: A 2-tuple of (original buffer, array view)
"""
try:
import numpy as np
except (ImportError, ModuleNotFoundError):
raise ImportError('Using data instrumentation reports requires numpy to be installed.')

with open(filename, 'rb') as fp:
# Recreate runtime shape and strides from buffer
ndims, = struct.unpack('i', fp.read(4))
Expand All @@ -95,10 +98,15 @@ def _read_array_file(self, filename: str, npdtype: np.dtype) -> Tuple[ArrayLike,
view = np.ndarray(shape, npdtype, buffer=nparr, strides=strides)
return nparr, view

def _read_symbol_file(self, filename: str, npdtype: np.dtype) -> Number:
def _read_symbol_file(self, filename: str, dtype) -> Number:
try:
import numpy as np
except (ImportError, ModuleNotFoundError):
raise ImportError('Using data instrumentation reports requires numpy to be installed.')

with open(filename, 'rb') as fp:
npclass = getattr(np, str(npdtype))
byteval = fp.read(npdtype.itemsize)
npclass = getattr(np, str(dtype))
byteval = fp.read(dtype.itemsize)
val = npclass(byteval)
return val

Expand Down Expand Up @@ -167,6 +175,11 @@ def update_report(self):

:see: dace.dtypes.DataInstrumentationType.Restore
"""
try:
import numpy as np
except (ImportError, ModuleNotFoundError):
raise ImportError('Using data instrumentation reports requires numpy to be installed.')

for (k, i), loaded in self.loaded_values.items():
if isinstance(loaded, np.ndarray):
dtype_bytes = loaded.dtype.itemsize
Expand Down
35 changes: 16 additions & 19 deletions dace/codegen/instrumentation/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from dataclasses import dataclass
import json
import numpy as np
from statistics import median, mean
import re
from typing import Any, Dict, List, Optional, Tuple, Union
from io import StringIO
Expand Down Expand Up @@ -237,10 +237,10 @@ def _get_runtimes_string(self,

string += row_format.format(indent + label + ':', '', '', '', '', width=colw)
string += row_format.format(indent,
'%.3f' % np.min(runtimes),
'%.3f' % np.mean(runtimes),
'%.3f' % np.median(runtimes),
'%.3f' % np.max(runtimes),
'%.3f' % min(runtimes),
'%.3f' % mean(runtimes),
'%.3f' % median(runtimes),
'%.3f' % max(runtimes),
width=colw)

return string, sdfg, state
Expand Down Expand Up @@ -295,10 +295,10 @@ def _get_counters_string(self,

string += row_format.format(indent + "|" + label + ':', '', '', '', '', width=colw)
string += row_format.format(indent,
np.min(values),
'%.2f' % np.mean(values),
'%.2f' % np.median(values),
np.max(values),
min(values),
'%.2f' % mean(values),
'%.2f' % median(values),
max(values),
width=colw)

return string, sdfg, state
Expand All @@ -310,15 +310,14 @@ def getkey(self, element):
runtimes = events[event]
result.extend(runtimes)

result = np.array(result)
if self._sortcat == 'min':
return np.min(result)
return min(result)
elif self._sortcat == 'max':
return np.max(result)
return max(result)
elif self._sortcat == 'mean':
return np.mean(result)
return mean(result)
else: # if self._sortcat == 'median':
return np.median(result)
return median(result)

def __str__(self):
COLW = 15
Expand Down Expand Up @@ -407,10 +406,9 @@ def as_csv(self) -> Tuple[str, str]:
for name, times in events.items():
for tid, runtimes in times.items():
sdfg, state, node = element
nptimes = np.array(runtimes)
cnt = len(runtimes)
mint, meant, mediant, maxt = np.min(nptimes), np.mean(nptimes), np.median(nptimes), np.max(
nptimes)
mint, meant, mediant, maxt = min(runtimes), mean(runtimes), median(runtimes), max(
runtimes)
durations_csv.write(f'{name},{sdfg},{state},{node},{tid},{cnt},{mint},{meant},{mediant},{maxt}\n')

# Create counters CSV
Expand All @@ -422,9 +420,8 @@ def as_csv(self) -> Tuple[str, str]:
for ctrname, ctrvalues in counters.items():
for tid, values in ctrvalues.items():
sdfg, state, node = element
npval = np.array(values)
cnt = len(values)
mint, meant, mediant, maxt = np.min(npval), np.mean(npval), np.median(npval), np.max(npval)
mint, meant, mediant, maxt = min(values), mean(values), median(values), max(values)
counters_csv.write(
f'{ctrname},{name},{sdfg},{state},{node},{tid},{cnt},{mint},{meant},{mediant},{maxt}\n')

Expand Down
5 changes: 2 additions & 3 deletions dace/codegen/targets/fpga.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import itertools
import re
import warnings
import numpy as np
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Union
import copy

Expand Down Expand Up @@ -2111,9 +2110,9 @@ def _generate_MapEntry(self, sdfg: SDFG, cfg: ControlFlowRegion, dfg: StateSubgr
else:
end_type = None
if end_type is not None:
if np.dtype(end_type.dtype.type) > np.dtype('uint32'):
if end_type.bytes > 4: # type is wider than uint32
loop_var_type = end_type.ctype
elif np.issubdtype(np.dtype(end_type.dtype.type), np.unsignedinteger):
elif type(end_type).__name__.startswith('u'): # Unsigned types
loop_var_type = "size_t"
except (UnboundLocalError):
raise UnboundLocalError('Pipeline scopes require '
Expand Down
20 changes: 14 additions & 6 deletions dace/codegen/targets/framecode.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple, Union

import networkx as nx
import numpy as np

import dace
from dace import config, data, dtypes
Expand Down Expand Up @@ -116,11 +115,20 @@ def generate_constants(self, sdfg: SDFG, callsite_stream: CodeIOStream):
for cstname, (csttype, cstval) in sdfg.constants_prop.items():
if isinstance(csttype, data.Array):
const_str = "constexpr " + csttype.dtype.ctype + " " + cstname + "[" + str(cstval.size) + "] = {"
it = np.nditer(cstval, order='C')
for i in range(cstval.size - 1):
const_str += str(it[0]) + ", "
it.iternext()
const_str += str(it[0]) + "};\n"
try:
# If the constant is a multidimensional numpy array
import numpy as np
it = np.nditer(cstval, order='C')
for i in range(cstval.size - 1):
const_str += str(it[0]) + ", "
it.iternext()
const_str += str(it[0]) + "};\n"
except (ImportError, ModuleNotFoundError):
it = iter(cstval)
for v in it:
const_str += str(v) + ", "
const_str = const_str[:-2] + "};\n"

callsite_stream.write(const_str, sdfg)
else:
callsite_stream.write("constexpr %s %s = %s;\n" % (csttype.dtype.ctype, cstname, sym2cpp(cstval)), sdfg)
Expand Down
19 changes: 13 additions & 6 deletions dace/codegen/targets/intel_fpga.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import copy
import itertools
from six import StringIO
import numpy as np

import dace
from dace import registry, dtypes, symbolic
Expand Down Expand Up @@ -1268,11 +1267,19 @@ def generate_constants(self, sdfg, callsite_stream):
# First time, define it
self.generated_constants.add(cstname)
const_str += " = {"
it = np.nditer(cstval, order='C')
for i in range(cstval.size - 1):
const_str += str(it[0]) + ", "
it.iternext()
const_str += str(it[0]) + "};\n"
try:
# If the constant is a multidimensional numpy array
import numpy as np
it = np.nditer(cstval, order='C')
for i in range(cstval.size - 1):
const_str += str(it[0]) + ", "
it.iternext()
const_str += str(it[0]) + "};\n"
except (ImportError, ModuleNotFoundError):
it = iter(cstval)
for v in it:
const_str += str(v) + ", "
const_str = const_str[:-2] + "};\n"
else:
# only define
const_str = "extern " + const_str + ";\n"
Expand Down
3 changes: 1 addition & 2 deletions dace/codegen/targets/snitch.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from typing import Union
import dace
import itertools
import numpy as np
import sympy as sp

from dace.memlet import Memlet
Expand Down Expand Up @@ -864,7 +863,7 @@ def match_exp(expr, param, ignore):
continue

# ignore non double dtype
if not desc.dtype.as_numpy_dtype() == np.float64:
if desc.dtype != dtypes.float64:
continue

# get acces type: read and write
Expand Down
Loading
Loading