diff --git a/doc/running.rst b/doc/running.rst index 477dbe156..f13c0e7c6 100644 --- a/doc/running.rst +++ b/doc/running.rst @@ -210,14 +210,16 @@ After generating and building the model code, a ``receptor_type`` entry is avail neuron = nest.Create("iaf_psc_exp_multisynapse_neuron_nestml") + receptor_types = nest.GetStatus(neuron, "receptor_types")[0] + sg = nest.Create("spike_generator", params={"spike_times": [20., 80.]}) - nest.Connect(sg, neuron, syn_spec={"receptor_type" : 1, "weight": 1000.}) + nest.Connect(sg, neuron, syn_spec={"receptor_type" : receptor_types["SPIKES_1"], "weight": 1000.}) sg2 = nest.Create("spike_generator", params={"spike_times": [40., 60.]}) - nest.Connect(sg2, neuron, syn_spec={"receptor_type" : 2, "weight": 1000.}) + nest.Connect(sg2, neuron, syn_spec={"receptor_type" : receptor_types["SPIKES_2"], "weight": 1000.}) sg3 = nest.Create("spike_generator", params={"spike_times": [30., 70.]}) - nest.Connect(sg3, neuron, syn_spec={"receptor_type" : 3, "weight": 500.}) + nest.Connect(sg3, neuron, syn_spec={"receptor_type" : receptor_types["SPIKES_3"], "weight": 500.}) Note that in multisynapse neurons, receptor ports are numbered starting from 1. @@ -225,9 +227,9 @@ We furthermore wish to record the synaptic currents ``I_kernel1``, ``I_kernel2`` .. code-block:: python - mm = nest.Create('multimeter', params={'record_from': ['I_kernel1__X__spikes1', - 'I_kernel2__X__spikes2', - 'I_kernel3__X__spikes3'], + mm = nest.Create('multimeter', params={'record_from': ['I_kernel1__X__spikes_1', + 'I_kernel2__X__spikes_2', + 'I_kernel3__X__spikes_3'], 'interval': .1}) nest.Connect(mm, neuron) diff --git a/models/neurons/hh_moto_5ht.nestml b/models/neurons/hh_moto_5ht.nestml index 311482eca..767a90d75 100644 --- a/models/neurons/hh_moto_5ht.nestml +++ b/models/neurons/hh_moto_5ht.nestml @@ -49,7 +49,7 @@ model hh_moto_5ht_neuron: inline I_syn_exc pA = convolve(I_syn_ex, exc_spikes) inline I_syn_inh pA = convolve(I_syn_in, inh_spikes) - inline E_Ca mV = ((1000.0 * R_const * T_current) / (2. * F_const)) * log10(Ca_out / Ca_in) + inline E_Ca mV = ((1000.0 * R_const * T_current) / (2. * F_const)) * log10(Ca_out / Ca_in) * mV inline I_Na pA = g_Na * Act_m * Act_m * Act_m * Act_h * ( V_m - E_Na ) inline I_K pA = g_K_rect * Inact_n * Inact_n * Inact_n * Inact_n * ( V_m - E_K ) diff --git a/models/neurons/hill_tononi.nestml b/models/neurons/hill_tononi.nestml index 0f9f8b54c..3dedda367 100644 --- a/models/neurons/hill_tononi.nestml +++ b/models/neurons/hill_tononi.nestml @@ -79,11 +79,11 @@ model hill_tononi_neuron: recordable inline I_KNa pA = -KNa_g_peak * m_inf_KNa * ( V_m - KNa_E_rev ) # Low-thresh Ca current; member only to allow recording - recordable inline I_T pA = -T_g_peak * IT_m * IT_m * IT_h * ( V_m - T_E_rev ) + recordable inline I_T pA = -T_g_peak / nS * IT_m / nS * IT_m / nS * IT_h * ( V_m - T_E_rev ) - recordable inline I_h pA = -h_g_peak * Ih_m * ( V_m - h_E_rev ) + recordable inline I_h pA = -h_g_peak / nS * Ih_m * ( V_m - h_E_rev ) # The spike current is only activate immediately after a spike. - inline I_spike mV = (g_spike) ? -( V_m - E_K ) / Tau_spike : 0 + inline I_spike mV = (g_spike) ? -( V_m - E_K ) / Tau_spike * ms : 0 mV V_m' = ( ( I_Na + I_K + I_syn + I_NaP + I_KNa + I_T + I_h + I_e + I_stim ) / Tau_m + I_spike * pA/(ms * mV) ) * s/nF ############# @@ -96,8 +96,8 @@ model hill_tononi_neuron: # I_KNa inline D_influx_peak real = 0.025 inline tau_D real = 1250.0 # yes, 1.25 s - inline D_thresh mV = -10.0 - inline D_slope mV = 5.0 + inline D_thresh mV = -10.0 mV + inline D_slope mV = 5.0 mV inline D_influx real = 1.0 / ( 1.0 + exp( -( V_m - D_thresh ) / D_slope ) ) Theta' = -( Theta - Theta_eq ) / Tau_theta diff --git a/models/neurons/terub_stn.nestml b/models/neurons/terub_stn.nestml index 1b1b2b262..66f94001e 100644 --- a/models/neurons/terub_stn.nestml +++ b/models/neurons/terub_stn.nestml @@ -81,8 +81,8 @@ model terub_stn_neuron: inline k_Ca real = 22.5 inline k1 real = 15.0 - inline I_exc_mod pA = -convolve(g_exc, exc_spikes) * V_m - inline I_inh_mod pA = convolve(g_inh, inh_spikes) * (V_m - E_gs) + inline I_exc_mod pA = -convolve(g_exc, exc_spikes) * V_m / mV + inline I_inh_mod pA = convolve(g_inh, inh_spikes) * (V_m - E_gs) / mV inline tau_n ms = tau_n_0 + tau_n_1 / (1. + exp(-(V_m-theta_n_tau)/sigma_n_tau)) inline tau_h ms = tau_h_0 + tau_h_1 / (1. + exp(-(V_m-theta_h_tau)/sigma_h_tau)) diff --git a/pynestml/cocos/co_co_illegal_expression.py b/pynestml/cocos/co_co_illegal_expression.py index 0b1fe66bf..b78396e3b 100644 --- a/pynestml/cocos/co_co_illegal_expression.py +++ b/pynestml/cocos/co_co_illegal_expression.py @@ -18,6 +18,7 @@ # # You should have received a copy of the GNU General Public License # along with NEST. If not, see . +from pynestml.meta_model.ast_inline_expression import ASTInlineExpression from pynestml.utils.ast_source_location import ASTSourceLocation from pynestml.meta_model.ast_declaration import ASTDeclaration @@ -71,6 +72,19 @@ def visit_declaration(self, node): TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression()) return + def visit_inline_expression(self, node): + """ + Visits a single inline expression and asserts that type of lhs is equal to type of rhs. + """ + assert isinstance(node, ASTInlineExpression) + lhs_type = node.get_data_type().get_type_symbol() + rhs_type = node.get_expression().type + if isinstance(rhs_type, ErrorTypeSymbol): + LoggingHelper.drop_missing_type_error(node) + return + if self.__types_do_not_match(lhs_type, rhs_type): + TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression()) + def visit_assignment(self, node): """ Visits a single expression and assures that type(lhs) == type(rhs). @@ -231,8 +245,7 @@ def visit_for_stmt(self, node): Logger.log_message(code=code, message=message, error_position=node.get_start_from().get_source_position(), log_level=LoggingLevel.ERROR) elif not (from_type.equals(PredefinedTypes.get_integer_type()) - or from_type.equals( - PredefinedTypes.get_real_type())): + or from_type.equals(PredefinedTypes.get_real_type())): code, message = Messages.get_type_different_from_expected(PredefinedTypes.get_integer_type(), from_type) Logger.log_message(code=code, message=message, error_position=node.get_start_from().get_source_position(), diff --git a/pynestml/codegeneration/printers/nestml_printer.py b/pynestml/codegeneration/printers/nestml_printer.py index b8ee28aef..b1d7e3aa9 100644 --- a/pynestml/codegeneration/printers/nestml_printer.py +++ b/pynestml/codegeneration/printers/nestml_printer.py @@ -246,7 +246,7 @@ def print_declaration(self, node: ASTDeclaration) -> str: ret += "," ret += " " + self.print(node.get_data_type()) + " " if node.has_size_parameter(): - ret += "[" + node.get_size_parameter() + "] " + ret += "[" + self.print(node.get_size_parameter()) + "] " if node.has_expression(): ret += "= " + self.print(node.get_expression()) if node.has_invariant(): @@ -363,7 +363,7 @@ def print_input_port(self, node: ASTInputPort) -> str: if node.has_datatype(): ret += " " + self.print(node.get_datatype()) + " " if node.has_size_parameter(): - ret += "[" + node.get_size_parameter() + "]" + ret += "[" + self.print(node.get_size_parameter()) + "]" ret += "<- " if node.has_input_qualifiers(): for qual in node.get_input_qualifiers(): diff --git a/pynestml/frontend/pynestml_frontend.py b/pynestml/frontend/pynestml_frontend.py index 84e0db835..994d97e9b 100644 --- a/pynestml/frontend/pynestml_frontend.py +++ b/pynestml/frontend/pynestml_frontend.py @@ -153,6 +153,17 @@ def generate_target(input_path: Union[str, Sequence[str]], target_platform: str, codegen_opts : Optional[Mapping[str, Any]] A dictionary containing additional options for the target code generator. """ + + configure_front_end(input_path, target_platform, target_path, install_path, logging_level, + module_name, store_log, suffix, dev, codegen_opts) + if not process() == 0: + raise Exception("Error(s) occurred while processing the model") + + +def configure_front_end(input_path: Union[str, Sequence[str]], target_platform: str, target_path=None, + install_path: str = None, logging_level="ERROR", module_name=None, store_log=False, suffix="", + dev=False, codegen_opts: Optional[Mapping[str, Any]] = None): + args = list() args.append(qualifier_input_path_arg) if type(input_path) is str: @@ -194,9 +205,6 @@ def generate_target(input_path: Union[str, Sequence[str]], target_platform: str, if codegen_opts: FrontendConfiguration.set_codegen_opts(codegen_opts) - if not process() == 0: - raise Exception("Error(s) occurred while processing the model") - def generate_nest_target(input_path: Union[str, Sequence[str]], target_path: Optional[str] = None, install_path: Optional[str] = None, logging_level="ERROR", @@ -277,16 +285,17 @@ def main() -> int: return int(process()) -def process(): +def get_parsed_models(): r""" - The main toolchain workflow entry point. For all models: parse, validate, transform, generate code and build. + Handle the parsing and validation of the NESTML files Returns ------- + models: Sequence[Union[ASTNeuron, ASTSynapse]] + List of correctly parsed models errors_occurred : bool Flag indicating whether errors occurred during processing """ - # init log dir create_report_dir() @@ -304,21 +313,10 @@ def process(): parsed_unit = ModelParser.parse_file(nestml_file) if parsed_unit is None: # Parsing error in the NESTML model, return True - return True + return [], True compilation_units.append(parsed_unit) - # initialize and set options for transformers, code generator and builder - codegen_and_builder_opts = FrontendConfiguration.get_codegen_opts() - transformers, codegen_and_builder_opts = transformers_from_target_name(FrontendConfiguration.get_target_platform(), - options=codegen_and_builder_opts) - _codeGenerator = code_generator_from_target_name(FrontendConfiguration.get_target_platform()) - codegen_and_builder_opts = _codeGenerator.set_options(codegen_and_builder_opts) - _builder, codegen_and_builder_opts = builder_from_target_name(FrontendConfiguration.get_target_platform(), options=codegen_and_builder_opts) - - if len(codegen_and_builder_opts) > 0: - raise CodeGeneratorOptionsException("The code generator option(s) \"" + ", ".join(codegen_and_builder_opts.keys()) + "\" do not exist.") - if len(compilation_units) > 0: # generate a list of all models models: Sequence[ASTModel] = [] @@ -335,24 +333,59 @@ def process(): Logger.log_message(node=model, code=code, message=message, error_position=model.get_source_position(), log_level=LoggingLevel.WARNING) - return True + return [model], True + + return models, False + + +def transform_models(transformers, models): + for transformer in transformers: + models = transformer.transform(models) + return models + + +def generate_code(code_generators, models): + code_generators.generate_code(models) + + +def process(): + r""" + The main toolchain workflow entry point. For all models: parse, validate, transform, generate code and build. + + Returns + ------- + errors_occurred : bool + Flag indicating whether errors occurred during processing + """ + + # initialize and set options for transformers, code generator and builder + codegen_and_builder_opts = FrontendConfiguration.get_codegen_opts() + + transformers, codegen_and_builder_opts = transformers_from_target_name(FrontendConfiguration.get_target_platform(), + options=codegen_and_builder_opts) + + code_generator = code_generator_from_target_name(FrontendConfiguration.get_target_platform()) + codegen_and_builder_opts = code_generator.set_options(codegen_and_builder_opts) + + _builder, codegen_and_builder_opts = builder_from_target_name(FrontendConfiguration.get_target_platform(), options=codegen_and_builder_opts) + + if len(codegen_and_builder_opts) > 0: + raise CodeGeneratorOptionsException("The code generator option(s) \"" + ", ".join(codegen_and_builder_opts.keys()) + "\" do not exist.") - # run transformers - for transformer in transformers: - models = transformer.transform(models) + models, errors_occurred = get_parsed_models() - # perform code generation - _codeGenerator.generate_code(models) + if not errors_occurred: + models = transform_models(transformers, models) + generate_code(code_generator, models) - # perform build - if _builder is not None: - _builder.build() + # perform build + if _builder is not None: + _builder.build() if FrontendConfiguration.store_log: store_log_to_file() - # Everything is fine, return false, i.e., no errors have occurred. - return False + return errors_occurred def init_predefined(): diff --git a/pynestml/utils/ast_utils.py b/pynestml/utils/ast_utils.py index 8d49cb1df..3af443cf6 100644 --- a/pynestml/utils/ast_utils.py +++ b/pynestml/utils/ast_utils.py @@ -1224,7 +1224,7 @@ def construct_kernel_X_spike_buf_name(cls, kernel_var_name: str, spike_input_por spike_input_port_name = spike_input_port.get_name() if spike_input_port.has_vector_parameter(): - spike_input_port_name += str(cls.get_numeric_vector_size(spike_input_port)) + spike_input_port_name += "_" + str(cls.get_numeric_vector_size(spike_input_port)) return kernel_var_name.replace("$", "__DOLLAR") + "__X__" + spike_input_port_name + diff_order_symbol * order @@ -1317,6 +1317,13 @@ def get_input_port_by_name(cls, input_blocks: List[ASTInputBlock], port_name: st """ for input_block in input_blocks: for input_port in input_block.get_input_ports(): + if input_port.has_size_parameter(): + size_parameter = input_port.get_size_parameter() + if isinstance(size_parameter, ASTSimpleExpression): + size_parameter = size_parameter.get_numeric_literal() + port_name, port_index = port_name.split("_") + assert int(port_index) > 0 + assert int(port_index) <= size_parameter if input_port.name == port_name: return input_port return None diff --git a/tests/cocos_test.py b/tests/cocos_test.py index ea0146d3e..8c6c21453 100644 --- a/tests/cocos_test.py +++ b/tests/cocos_test.py @@ -407,7 +407,7 @@ def test_invalid_convolve_correctly_defined(self): os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), 'invalid')), 'CoCoConvolveNotCorrectlyProvided.nestml')) self.assertEqual(len(Logger.get_all_messages_of_level_and_or_node(model.get_model_list()[0], - LoggingLevel.ERROR)), 2) + LoggingLevel.ERROR)), 3) def test_valid_convolve_correctly_defined(self): Logger.set_logging_level(LoggingLevel.INFO) @@ -487,7 +487,7 @@ def test_invalid_convolve_correctly_parameterized(self): os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), 'invalid')), 'CoCoConvolveNotCorrectlyParametrized.nestml')) self.assertEqual(len( - Logger.get_all_messages_of_level_and_or_node(model.get_model_list()[0], LoggingLevel.ERROR)), 1) + Logger.get_all_messages_of_level_and_or_node(model.get_model_list()[0], LoggingLevel.ERROR)), 2) def test_valid_convolve_correctly_parameterized(self): Logger.set_logging_level(LoggingLevel.INFO) diff --git a/tests/nest_tests/nest_multisynapse_test.py b/tests/nest_tests/nest_multisynapse_test.py index 58e7b3663..865b6bc6e 100644 --- a/tests/nest_tests/nest_multisynapse_test.py +++ b/tests/nest_tests/nest_multisynapse_test.py @@ -146,7 +146,7 @@ def test_multisynapse_with_vector_input_ports(self): nest.Connect(sg3, neuron, syn_spec={"receptor_type": receptor_types["SPIKES_3"], "weight": 500., "delay": 0.1}) mm = nest.Create("multimeter", params={"record_from": [ - "I_kernel1__X__spikes1", "I_kernel2__X__spikes2", "I_kernel3__X__spikes3"], "interval": 0.1}) + "I_kernel1__X__spikes_1", "I_kernel2__X__spikes_2", "I_kernel3__X__spikes_3"], "interval": 0.1}) nest.Connect(mm, neuron) vm_1 = nest.Create("voltmeter") diff --git a/tests/valid/CoCoConvolveNotCorrectlyProvided.nestml b/tests/valid/CoCoConvolveNotCorrectlyProvided.nestml index f855ded21..050d87cba 100644 --- a/tests/valid/CoCoConvolveNotCorrectlyProvided.nestml +++ b/tests/valid/CoCoConvolveNotCorrectlyProvided.nestml @@ -34,7 +34,7 @@ along with NEST. If not, see . model CoCoConvolveNotCorrectlyProvided: equations: kernel test = 10 - inline testB pA = convolve(test, spikeExc) # convolve provided with a kernel and a spike input port, thus correct + inline testB pA = convolve(test, spikeExc) * pA # convolve provided with a kernel and a spike input port, thus correct input: spikeExc integer <- excitatory spike