Skip to content

Commit

Permalink
Merge pull request #280 from NexaAI/david/bugfix
Browse files Browse the repository at this point in the history
update llama.cpp
  • Loading branch information
zhiyuan8 authored Nov 22, 2024
2 parents 4e1b535 + 159f2b9 commit b4861fb
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 5 deletions.
2 changes: 1 addition & 1 deletion dependency/llama.cpp
2 changes: 1 addition & 1 deletion nexa/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.0.9.3"
__version__ = "0.0.9.4"
3 changes: 0 additions & 3 deletions nexa/gguf/nexa_inference_vlm_omni.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,9 +149,6 @@ def run(self):

def inference(self, prompt: str, image_path: str):
with suppress_stdout_stderr():
if prompt and prompt[0].islower():
prompt = prompt[0].upper() + prompt[1:]

prompt = ctypes.c_char_p(prompt.encode("utf-8"))
image_path = ctypes.c_char_p(image_path.encode("utf-8"))
response = omni_vlm_cpp.omnivlm_inference(prompt, image_path)
Expand Down

0 comments on commit b4861fb

Please sign in to comment.