From 7c2227a1972a4add4b5c118e4914c086513d0382 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Wed, 23 Aug 2023 10:29:09 -0400 Subject: [PATCH] chmod : make scripts executable (#2675) --- ci/run.sh | 0 convert-falcon-hf-to-gguf.py | 1 + convert-gptneox-hf-to-gguf.py | 1 + convert-llama-7b-pth-to-gguf.py | 1 + convert-llama-ggmlv3-to-gguf.py | 1 + convert-llama-hf-to-gguf.py | 1 + convert-lora-to-ggml.py | 2 +- convert.py | 2 +- examples/embd-input/embd_input.py | 1 + examples/embd-input/llava.py | 1 + examples/embd-input/minigpt4.py | 1 + examples/embd-input/panda_gpt.py | 1 + examples/jeopardy/graph.py | 1 + examples/jeopardy/jeopardy.sh | 0 examples/json-schema-to-grammar.py | 1 + examples/make-ggml.py | 1 + examples/reason-act.sh | 1 - examples/server-llama2-13B.sh | 0 examples/server/api_like_OAI.py | 1 + examples/server/chat-llama2.sh | 0 examples/server/chat.sh | 0 gguf.py | 1 + scripts/get-wikitext-2.sh | 0 23 files changed, 16 insertions(+), 3 deletions(-) mode change 100644 => 100755 ci/run.sh mode change 100644 => 100755 convert-falcon-hf-to-gguf.py mode change 100644 => 100755 convert-gptneox-hf-to-gguf.py mode change 100644 => 100755 convert-llama-7b-pth-to-gguf.py mode change 100644 => 100755 convert-llama-ggmlv3-to-gguf.py mode change 100644 => 100755 convert-llama-hf-to-gguf.py mode change 100644 => 100755 convert.py mode change 100644 => 100755 examples/embd-input/embd_input.py mode change 100644 => 100755 examples/embd-input/llava.py mode change 100644 => 100755 examples/embd-input/minigpt4.py mode change 100644 => 100755 examples/embd-input/panda_gpt.py mode change 100644 => 100755 examples/jeopardy/graph.py mode change 100644 => 100755 examples/jeopardy/jeopardy.sh mode change 100644 => 100755 examples/json-schema-to-grammar.py mode change 100644 => 100755 examples/make-ggml.py mode change 100644 => 100755 examples/server-llama2-13B.sh mode change 100644 => 100755 examples/server/chat-llama2.sh mode change 100644 => 100755 examples/server/chat.sh mode change 100644 => 100755 gguf.py mode change 100644 => 100755 scripts/get-wikitext-2.sh diff --git a/ci/run.sh b/ci/run.sh old mode 100644 new mode 100755 diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py old mode 100644 new mode 100755 index b3e190a0fd83c..50069db56213c --- a/convert-falcon-hf-to-gguf.py +++ b/convert-falcon-hf-to-gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # HF falcon--> gguf conversion import gguf diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py old mode 100644 new mode 100755 index a7cefc6f35e04..6eeff5bb1eb58 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # HF gptneox--> gguf conversion import gguf diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py old mode 100644 new mode 100755 index ab5c80b69ede2..f103f5f61df23 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # 7b pth llama --> gguf conversion # Only models with a single datafile are supported, like 7B # HF files required in the model dir: config.json tokenizer_config.json tokenizer.json tokenizer.model diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py old mode 100644 new mode 100755 index 86d4596804d61..3bf93627d225c --- a/convert-llama-ggmlv3-to-gguf.py +++ b/convert-llama-ggmlv3-to-gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import sys, struct, math, argparse from pathlib import Path diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py old mode 100644 new mode 100755 index f8cfdaa800c85..08fde238b5c3a --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # HF llama --> gguf conversion import gguf diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index b4999ff5a07c8..04a7b8bbf4eaf 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import json import os import re diff --git a/convert.py b/convert.py old mode 100644 new mode 100755 index e720889fd515a..a701ab41b436a --- a/convert.py +++ b/convert.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import gguf import argparse diff --git a/examples/embd-input/embd_input.py b/examples/embd-input/embd_input.py old mode 100644 new mode 100755 index be2896614e9b3..f146acdc19de7 --- a/examples/embd-input/embd_input.py +++ b/examples/embd-input/embd_input.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import ctypes from ctypes import cdll, c_char_p, c_void_p, POINTER, c_float, c_int import numpy as np diff --git a/examples/embd-input/llava.py b/examples/embd-input/llava.py old mode 100644 new mode 100755 index bcbdd2bedfd1a..06fad55f4980e --- a/examples/embd-input/llava.py +++ b/examples/embd-input/llava.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import sys import os sys.path.insert(0, os.path.dirname(__file__)) diff --git a/examples/embd-input/minigpt4.py b/examples/embd-input/minigpt4.py old mode 100644 new mode 100755 index 15c9b77c0d37c..7b13e4a5cc4f8 --- a/examples/embd-input/minigpt4.py +++ b/examples/embd-input/minigpt4.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import sys import os sys.path.insert(0, os.path.dirname(__file__)) diff --git a/examples/embd-input/panda_gpt.py b/examples/embd-input/panda_gpt.py old mode 100644 new mode 100755 index 0cfac5f32adf2..891ad7cc9ffbd --- a/examples/embd-input/panda_gpt.py +++ b/examples/embd-input/panda_gpt.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import sys import os sys.path.insert(0, os.path.dirname(__file__)) diff --git a/examples/jeopardy/graph.py b/examples/jeopardy/graph.py old mode 100644 new mode 100755 index 1b6c54bff73d1..8bc0706b86d05 --- a/examples/jeopardy/graph.py +++ b/examples/jeopardy/graph.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import matplotlib.pyplot as plt import os import csv diff --git a/examples/jeopardy/jeopardy.sh b/examples/jeopardy/jeopardy.sh old mode 100644 new mode 100755 diff --git a/examples/json-schema-to-grammar.py b/examples/json-schema-to-grammar.py old mode 100644 new mode 100755 index 2dccc118a70e8..2a4cb65bcfc7e --- a/examples/json-schema-to-grammar.py +++ b/examples/json-schema-to-grammar.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import argparse import json import re diff --git a/examples/make-ggml.py b/examples/make-ggml.py old mode 100644 new mode 100755 index f63d9fc22fb3f..6a34eeac53faa --- a/examples/make-ggml.py +++ b/examples/make-ggml.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """ This script converts Hugging Face llama models to GGML and quantizes them. diff --git a/examples/reason-act.sh b/examples/reason-act.sh index e7fe655dbcea3..046c48db584bc 100755 --- a/examples/reason-act.sh +++ b/examples/reason-act.sh @@ -1,4 +1,3 @@ - #!/bin/bash cd `dirname $0` diff --git a/examples/server-llama2-13B.sh b/examples/server-llama2-13B.sh old mode 100644 new mode 100755 diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index aa325a03ee444..ed19237b0b3e5 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import argparse from flask import Flask, jsonify, request, Response import urllib.parse diff --git a/examples/server/chat-llama2.sh b/examples/server/chat-llama2.sh old mode 100644 new mode 100755 diff --git a/examples/server/chat.sh b/examples/server/chat.sh old mode 100644 new mode 100755 diff --git a/gguf.py b/gguf.py old mode 100644 new mode 100755 index 4657467182328..9421080b80528 --- a/gguf.py +++ b/gguf.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import shutil import sys import struct diff --git a/scripts/get-wikitext-2.sh b/scripts/get-wikitext-2.sh old mode 100644 new mode 100755