1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
| Model revision not specified, use revision: v1.0.0
ValueError: DefaultTrainer: SequenceLabelingModel: TransformerEmbedder: Due to a serious vulnerability issue in `torch.load`, even with `weights_only=True`, we now require users to upgrade torch to at least v2.6 in order to use the function. This version restriction does not apply when loading files with safetensors. See the vulnerability report here https://nvd.nist.gov/vuln/detail/CVE-2025-32434
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) File /usr/local/lib/python3.11/site-packages/modelscope/utils/registry.py:211, in build_from_cfg(cfg, registry, group_key, default_args) 210 else: --> 211 return obj_cls(**args) 212 except Exception as e: 213 # Normal TypeError does not print class name.
File /usr/local/lib/python3.11/site-packages/adaseq/modules/embedders/transformer_embedder.py:84, in TransformerEmbedder.__init__(self, model_name_or_path, drop_special_tokens, sub_module, train_parameters, eval_mode, load_weights, scalar_mix, gradient_checkpointing, transformer_kwargs, sub_token_mode) 82 self.sub_token_mode = sub_token_mode ---> 84 self.transformer_model, self.from_hf = get_transformer( 85 model_name_or_path, 86 load_weights=load_weights, 87 **(transformer_kwargs or {}), 88 ) 90 if self.from_hf:
File /usr/local/lib/python3.11/site-packages/adaseq/modules/embedders/transformer_embedder.py:363, in get_transformer(model_name_or_path, load_weights, source, **kwargs) 362 try: --> 363 return get_ms_transformer(model_name_or_path, **kwargs), False 364 except HTTPError as e:
File /usr/local/lib/python3.11/site-packages/adaseq/modules/embedders/transformer_embedder.py:414, in get_ms_transformer(model_name_or_path, **kwargs) 413 try: --> 414 transformer = MsModel.from_pretrained(model_name_or_path, task='backbone', **kwargs) 415 except KeyError:
File /usr/local/lib/python3.11/site-packages/modelscope/models/base/base_model.py:178, in Model.from_pretrained(cls, model_name_or_path, revision, cfg_dict, device, trust_remote_code, **kwargs) 177 if use_hf in {True, None}: --> 178 model = try_to_load_hf_model(local_model_dir, task_name, use_hf, 179 **kwargs) 180 if model is not None:
File /usr/local/lib/python3.11/site-packages/modelscope/utils/automodel_utils.py:125, in try_to_load_hf_model(model_dir, task_name, use_hf, **kwargs) 123 if automodel_class is not None: 124 # use hf --> 125 model = automodel_class.from_pretrained(model_dir, **kwargs) 126 return model
File /usr/local/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py:281, in _patch_pretrained_class.<locals>.get_wrapped_class.<locals>.ClassWrapper.from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs) 278 model_dir = get_model_dir(pretrained_model_name_or_path, 279 **kwargs) --> 281 module_obj = module_class.from_pretrained( 282 model_dir, *model_args, **kwargs) 284 if module_class.__name__.startswith('AutoModel'):
File /usr/local/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:600, in _BaseAutoModelClass.from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs) 599 config = config.get_text_config() --> 600 return model_class.from_pretrained( 601 pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs 602 ) 603 raise ValueError( 604 f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" 605 f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping)}." 606 )
File /usr/local/lib/python3.11/site-packages/transformers/modeling_utils.py:317, in restore_default_torch_dtype.<locals>._wrapper(*args, **kwargs) 316 try: --> 317 return func(*args, **kwargs) 318 finally:
File /usr/local/lib/python3.11/site-packages/transformers/modeling_utils.py:5069, in PreTrainedModel.from_pretrained(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, weights_only, *model_args, **kwargs) 5060 torch.set_default_dtype(dtype_orig) 5062 ( 5063 model, 5064 missing_keys, 5065 unexpected_keys, 5066 mismatched_keys, 5067 offload_index, 5068 error_msgs, -> 5069 ) = cls._load_pretrained_model( 5070 model, 5071 state_dict, 5072 checkpoint_files, 5073 pretrained_model_name_or_path, 5074 ignore_mismatched_sizes=ignore_mismatched_sizes, 5075 sharded_metadata=sharded_metadata, 5076 device_map=device_map, 5077 disk_offload_folder=offload_folder, 5078 offload_state_dict=offload_state_dict, 5079 dtype=torch_dtype, 5080 hf_quantizer=hf_quantizer, 5081 keep_in_fp32_regex=keep_in_fp32_regex, 5082 device_mesh=device_mesh, 5083 key_mapping=key_mapping, 5084 weights_only=weights_only, 5085 ) 5086 # make sure token embedding weights are still tied if needed
File /usr/local/lib/python3.11/site-packages/transformers/modeling_utils.py:5335, in PreTrainedModel._load_pretrained_model(cls, model, state_dict, checkpoint_files, pretrained_model_name_or_path, ignore_mismatched_sizes, sharded_metadata, device_map, disk_offload_folder, offload_state_dict, dtype, hf_quantizer, keep_in_fp32_regex, device_mesh, key_mapping, weights_only) 5333 else: 5334 original_checkpoint_keys = list( -> 5335 load_state_dict(checkpoint_files[0], map_location="meta", weights_only=weights_only).keys() 5336 ) 5338 # Check if we are in a special state, i.e. loading from a state dict coming from a different architecture
File /usr/local/lib/python3.11/site-packages/transformers/modeling_utils.py:562, in load_state_dict(checkpoint_file, is_quantized, map_location, weights_only) 561 if weights_only: --> 562 check_torch_load_is_safe() 563 try:
File /usr/local/lib/python3.11/site-packages/transformers/utils/import_utils.py:1622, in check_torch_load_is_safe() 1621 if not is_torch_greater_or_equal("2.6"): -> 1622 raise ValueError( 1623 "Due to a serious vulnerability issue in `torch.load`, even with `weights_only=True`, we now require users " 1624 "to upgrade torch to at least v2.6 in order to use the function. This version restriction does not apply " 1625 "when loading files with safetensors." 1626 "\nSee the vulnerability report here [https://nvd.nist.gov/vuln/detail/CVE-2025-32434](https://nvd.nist.gov/vuln/detail/CVE-2025-32434)" 1627 )
ValueError: Due to a serious vulnerability issue in `torch.load`, even with `weights_only=True`, we now require users to upgrade torch to at least v2.6 in order to use the function. This version restriction does not apply when loading files with safetensors. See the vulnerability report here [https://nvd.nist.gov/vuln/detail/CVE-2025-32434](https://nvd.nist.gov/vuln/detail/CVE-2025-32434)
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last) File /usr/local/lib/python3.11/site-packages/modelscope/utils/registry.py:209, in build_from_cfg(cfg, registry, group_key, default_args) 208 if hasattr(obj_cls, '_instantiate'): --> 209 return obj_cls._instantiate(**args) 210 else:
File /usr/local/lib/python3.11/site-packages/modelscope/models/base/base_model.py:85, in Model._instantiate(cls, **kwargs) 80 """ Define the instantiation method of a model,default method is by 81 calling the constructor. Note that in the case of no loading model 82 process in constructor of a task model, a load_model method is 83 added, and thus this method is overloaded 84 """ ---> 85 return cls(**kwargs)
File /usr/local/lib/python3.11/site-packages/adaseq/models/base.py:40, in Model.__init_subclass__.<locals>.new_init(self, init, *args, **kwargs) 39 def new_init(self, init=cls.__init__, *args, **kwargs): ---> 40 init(self, *args, **kwargs) 41 self.post_init()
File /usr/local/lib/python3.11/site-packages/adaseq/models/sequence_labeling_model.py:64, in SequenceLabelingModel.__init__(self, id_to_label, embedder, encoder, dropout, word_dropout, use_crf, multiview, temperature, mv_loss_type, mv_interpolation, partial, chunk, **kwargs) 63 else: ---> 64 self.embedder = Embedder.from_config(embedder) 65 hidden_size = self.embedder.get_output_dim()
File /usr/local/lib/python3.11/site-packages/adaseq/modules/embedders/base.py:64, in Embedder.from_config(cls, cfg_dict_or_path, **kwargs) 63 if cfg['type'] is not None and cfg['type'] in EMBEDDERS.modules['default']: ---> 64 return build_embedder(cfg, default_args=kwargs) 65 else:
File /usr/local/lib/python3.11/site-packages/adaseq/modules/embedders/base.py:24, in build_embedder(cfg, default_args) 15 """Build embedder from config dict 16 17 Args: (...) 22 embedder (:obj:`Embedder`): an embedder instance 23 """ ---> 24 return build_from_cfg(cfg, EMBEDDERS, group_key='default', default_args=default_args)
File /usr/local/lib/python3.11/site-packages/modelscope/utils/registry.py:214, in build_from_cfg(cfg, registry, group_key, default_args) 212 except Exception as e: 213 # Normal TypeError does not print class name. --> 214 raise type(e)(f'{obj_cls.__name__}: {e}') from e
ValueError: TransformerEmbedder: Due to a serious vulnerability issue in `torch.load`, even with `weights_only=True`, we now require users to upgrade torch to at least v2.6 in order to use the function. This version restriction does not apply when loading files with safetensors. See the vulnerability report here [https://nvd.nist.gov/vuln/detail/CVE-2025-32434](https://nvd.nist.gov/vuln/detail/CVE-2025-32434)
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last) File /usr/local/lib/python3.11/site-packages/modelscope/utils/registry.py:211, in build_from_cfg(cfg, registry, group_key, default_args) 210 else: --> 211 return obj_cls(**args) 212 except Exception as e: 213 # Normal TypeError does not print class name.
File /usr/local/lib/python3.11/site-packages/adaseq/training/default_trainer.py:67, in DefaultTrainer.__init__(self, cfg_file, work_dir, dataset_manager, data_collator, preprocessor, seed, device, **kwargs) 56 def __init__( 57 self, 58 cfg_file: str, (...) 65 **kwargs, 66 ) -> None: ---> 67 super().__init__( 68 model=None, 69 cfg_file=cfg_file, 70 cfg_modify_fn=None, 71 data_collator=data_collator, 72 train_dataset=dataset_manager.train, 73 eval_dataset=dataset_manager.valid, 74 preprocessor=preprocessor, 75 work_dir=work_dir, 76 seed=seed, 77 device=device, 78 **kwargs, 79 ) 81 # Setup testset if there is one
File /usr/local/lib/python3.11/site-packages/modelscope/trainers/trainer.py:175, in EpochBasedTrainer.__init__(self, model, cfg_file, cfg_modify_fn, arg_parse_fn, data_collator, train_dataset, eval_dataset, preprocessor, optimizers, model_revision, seed, callbacks, samplers, efficient_tuners, **kwargs) 174 else: --> 175 self.model = self.build_model() 177 if self._compile: 178 # Compile the model with torch 2.0
File /usr/local/lib/python3.11/site-packages/adaseq/training/default_trainer.py:102, in DefaultTrainer.build_model(self) 99 """ 100 Override this func to build adaseq `Model`. 101 """ --> 102 return Model.from_config(self.cfg)
File /usr/local/lib/python3.11/site-packages/adaseq/models/base.py:118, in Model.from_config(cls, cfg_dict_or_path, **kwargs) 114 raise ValueError( 115 'Please pass a correct cfg dict, which should be a reachable file or a dict.' 116 ) --> 118 model = build_model(model_config, task_name=task, default_args=kwargs) 119 cfg['framework'] = 'pytorch'
File /usr/local/lib/python3.11/site-packages/modelscope/models/builder.py:35, in build_model(cfg, task_name, default_args) 34 try: ---> 35 model = build_from_cfg( 36 cfg, MODELS, group_key=task_name, default_args=default_args) 37 except KeyError as e: 38 # Handle subtask with a backbone model that hasn't been registered 39 # All the subtask with a parent task should have a task model, otherwise it is not a 40 # valid subtask
File /usr/local/lib/python3.11/site-packages/modelscope/utils/registry.py:214, in build_from_cfg(cfg, registry, group_key, default_args) 212 except Exception as e: 213 # Normal TypeError does not print class name. --> 214 raise type(e)(f'{obj_cls.__name__}: {e}') from e
ValueError: SequenceLabelingModel: TransformerEmbedder: Due to a serious vulnerability issue in `torch.load`, even with `weights_only=True`, we now require users to upgrade torch to at least v2.6 in order to use the function. This version restriction does not apply when loading files with safetensors. See the vulnerability report here [https://nvd.nist.gov/vuln/detail/CVE-2025-32434](https://nvd.nist.gov/vuln/detail/CVE-2025-32434)
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last) Cell In[3], line 8 5 work_dir = 'experiments/transformer_crf' 6 os.makedirs(work_dir, exist_ok=True) ----> 8 trainer = build_trainer_from_partial_objects( 9 config, 10 work_dir=work_dir, 11 seed=42, 12 device='cuda:0' 13 ) 15 # do training 16 trainer.train()
File /usr/local/lib/python3.11/site-packages/adaseq/commands/train.py:218, in build_trainer_from_partial_objects(config, work_dir, **kwargs) 215 collator_config = dict(type=collator_config) 216 data_collator = build_data_collator(preprocessor.tokenizer, collator_config) --> 218 trainer = build_trainer( 219 config.safe_get('train.trainer', Trainers.default_trainer), 220 cfg_file=new_config_path, 221 work_dir=work_dir, 222 dataset_manager=dm, 223 data_collator=data_collator, 224 preprocessor=preprocessor, 225 **kwargs, 226 ) 227 return trainer
File /usr/local/lib/python3.11/site-packages/adaseq/training/default_trainer.py:176, in build_trainer(name, **kwargs) 173 if 'WORLD_SIZE' in os.environ and int(os.environ['WORLD_SIZE']) > 1: 174 kwargs.update(launcher='pytorch', device='gpu') --> 176 trainer = ms_build_trainer(name, kwargs) 177 return trainer
File /usr/local/lib/python3.11/site-packages/modelscope/trainers/builder.py:39, in build_trainer(name, default_args) 36 register_plugins_repo(configuration.safe_get('plugins')) 37 register_modelhub_repo(model_dir, 38 configuration.get('allow_remote', False)) ---> 39 return build_from_cfg(cfg, TRAINERS, default_args=default_args)
File /usr/local/lib/python3.11/site-packages/modelscope/utils/registry.py:214, in build_from_cfg(cfg, registry, group_key, default_args) 211 return obj_cls(**args) 212 except Exception as e: 213 # Normal TypeError does not print class name. --> 214 raise type(e)(f'{obj_cls.__name__}: {e}') from e
ValueError: DefaultTrainer: SequenceLabelingModel: TransformerEmbedder: Due to a serious vulnerability issue in `torch.load`, even with `weights_only=True`, we now require users to upgrade torch to at least v2.6 in order to use the function. This version restriction does not apply when loading files with safetensors. See the vulnerability report here [https://nvd.nist.gov/vuln/detail/CVE-2025-32434](https://nvd.nist.gov/vuln/detail/CVE-2025-32434)
|