Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions local-llama/everything.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import ctypes
import datetime
import struct

#defines
EVERYTHING_REQUEST_FILE_NAME = 0x00000001
EVERYTHING_REQUEST_PATH = 0x00000002
EVERYTHING_REQUEST_FULL_PATH_AND_FILE_NAME = 0x00000004
EVERYTHING_REQUEST_EXTENSION = 0x00000008
EVERYTHING_REQUEST_SIZE = 0x00000010
EVERYTHING_REQUEST_DATE_CREATED = 0x00000020
EVERYTHING_REQUEST_DATE_MODIFIED = 0x00000040
EVERYTHING_REQUEST_DATE_ACCESSED = 0x00000080
EVERYTHING_REQUEST_ATTRIBUTES = 0x00000100
EVERYTHING_REQUEST_FILE_LIST_FILE_NAME = 0x00000200
EVERYTHING_REQUEST_RUN_COUNT = 0x00000400
EVERYTHING_REQUEST_DATE_RUN = 0x00000800
EVERYTHING_REQUEST_DATE_RECENTLY_CHANGED = 0x00001000
EVERYTHING_REQUEST_HIGHLIGHTED_FILE_NAME = 0x00002000
EVERYTHING_REQUEST_HIGHLIGHTED_PATH = 0x00004000
EVERYTHING_REQUEST_HIGHLIGHTED_FULL_PATH_AND_FILE_NAME = 0x00008000

#dll imports
everything_dll = ctypes.WinDLL ("D:\\Everything\\SDK\\dll\\Everything64.dll")
everything_dll.Everything_GetResultDateModified.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_ulonglong)]
everything_dll.Everything_GetResultSize.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_ulonglong)]
everything_dll.Everything_GetResultFileNameW.argtypes = [ctypes.c_int]
everything_dll.Everything_GetResultFileNameW.restype = ctypes.c_wchar_p

#setup search
def search(file):
everything_dll.Everything_SetSearchW("file")
everything_dll.Everything_SetRequestFlags(
EVERYTHING_REQUEST_FILE_NAME | EVERYTHING_REQUEST_PATH | EVERYTHING_REQUEST_SIZE | EVERYTHING_REQUEST_DATE_MODIFIED)
# execute the query
everything_dll.Everything_QueryW(1)
num_results = everything_dll.Everything_GetNumResults()


#get the number of results
num_results = everything_dll.Everything_GetNumResults()

#show the number of results
print("Result Count: {}".format(num_results))

#convert a windows FILETIME to a python datetime
#https://stackoverflow.com/questions/39481221/convert-datetime-back-to-windows-64-bit-filetime
WINDOWS_TICKS = int(1/10**-7) # 10,000,000 (100 nanoseconds or .1 microseconds)
WINDOWS_EPOCH = datetime.datetime.strptime('1601-01-01 00:00:00',
'%Y-%m-%d %H:%M:%S')
POSIX_EPOCH = datetime.datetime.strptime('1970-01-01 00:00:00',
'%Y-%m-%d %H:%M:%S')
EPOCH_DIFF = (POSIX_EPOCH - WINDOWS_EPOCH).total_seconds() # 11644473600.0
WINDOWS_TICKS_TO_POSIX_EPOCH = EPOCH_DIFF * WINDOWS_TICKS # 116444736000000000.0

def get_time(filetime):
"""Convert windows filetime winticks to python datetime.datetime."""
winticks = struct.unpack('<Q', filetime)[0]
microsecs = (winticks - WINDOWS_TICKS_TO_POSIX_EPOCH) / WINDOWS_TICKS
return datetime.datetime.fromtimestamp(microsecs)

#create buffers
filename = ctypes.create_unicode_buffer(260)
date_modified_filetime = ctypes.c_ulonglong(1)
file_size = ctypes.c_ulonglong(1)

#show results
for i in range(num_results):

everything_dll.Everything_GetResultFullPathNameW(i,filename,260)
everything_dll.Everything_GetResultDateModified(i,date_modified_filetime)
everything_dll.Everything_GetResultSize(i,file_size)
print("Filename: {}\nDate Modified: {}\nSize: {} bytes\n".format(ctypes.wstring_at(filename),get_time(date_modified_filetime),file_size.value))
150 changes: 150 additions & 0 deletions local-llama/llama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
import json

from llama_cpp import Llama
import re
import subprocess
from everytools import EveryTools
es = EveryTools()

model = Llama(
"D:/Llama3/Llama3-8B-Chinese-Chat-q8-v2.gguf",
verbose=False,
chat_format="chatml-function-calling", #
n_ctx=8192,
n_gpu_layers=-1,
)

system_prompt = ("你是一个本地Windows AI助手,能够使用工具完成用户要求,也能用户进行对话。\
注意你可以调用工具\"get_path\"和\"opr_cmd\"来完成用户的要求。\
\"get_path\"能搜索到文件的路径,\
\"opr_cmd\"能执行cmd指令,和电脑应用文件进行交互。 \
注意:如果完成要求的cmd指令需要知道路径,可以先调用get_path来获取路径,再用opr_cmd来执行。 \
请逐步思考。\
")
# 注意如果,只是对话,不涉及到工具调用,请返回{arguments: { \"message\": \"你的回答\"}}

def get_path(file):
es.search(file)
res = es.results()
rows = []
for index, r in res.iterrows():
row = {}
for label, value in r.items():
if label == "path" or label == "name":
row[label] = value
rows.append(row)
return json.dumps(rows[:3])

def opr_cmd(cmd):
result = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
stdout = result.stdout
stderr = result.stderr
out = []
if result.returncode == 0:
cmdout = {}
cmdout["out"] = stdout
out.append(cmdout)
print("命令执行成功,输出为:")
print(stdout)
else:
cmderr = {}
cmderr["err"] = stderr
out.append(cmderr)
print("命令执行失败,错误信息为:")
print(stderr)
return json.dumps(out)

def generate_response(_model, _messages, _tools, _max_tokens=100):
response = _model.create_chat_completion(
messages=_messages,
tools=_tools,
tool_choice="auto",
# temperature=0.2,
# top_p=0.9,
# stop=["<|eot_id|>", "<|end_of_text|>"],
max_tokens=_max_tokens,
)
return response

tools_mapping = {
"get_path": get_path,
"opr_cmd": opr_cmd
}

def interact_chat():
messages = [
{
"role": "system",
"content": system_prompt,
},
]
tools = [
{
"type": "function",
"function": {
"name": "get_path",
"description": "Get the path of the file",
"parameters": {
"type": "object",
"properties": {
"file": {
"type": "string",
"description": "The name of the file, e.g. QQ.exe",
}
},
"required": ["file"],
},
}
},
{
"type": "function",
"function": {
"name": "opr_cmd",
"description": "Operate the cmd command and get the output",
"parameters": {
"type": "object",
"properties": {
"cmd": {
"type": "string",
"description": "The windows cmd command, e.g. start QQ.exe",
}
},
"required": ["cmd"],
},
}
}
]
user_input = input()
messages.append(
{
"role": "user",
"content": user_input,
}
)
while True:
response = generate_response(model, messages, tools)
res_content = response["choices"][0]["message"]["content"]
tool_calls = response["choices"][0]["message"].get("tool_calls", None)
messages.append(response["choices"][0]["message"])
for tool in tool_calls:
tool_name = tool["function"]["name"]
tool_args = json.loads(tool["function"]["arguments"])
if tool_name in tools_mapping:
tool_response = tools_mapping[tool_name](**tool_args)
messages.append({
"role": "assistant",
"content": tool_response,
})
else:
print("Windows Assistant: ", tool_args.get("message", None))
user_input = input()
messages.append(
{
"role": "user",
"content": user_input,
}
)
if user_input == "exit":
break
interact_chat()

122 changes: 122 additions & 0 deletions local-llama/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
from llama_cpp import Llama
from everytools import EveryTools

import re
import json
import subprocess
model = Llama(
"D:/Llama3/Llama3-8B-Chinese-Chat-q8-v2.gguf",
verbose=False,
n_ctx=8192,
n_threads=4,
n_gpu_layers=-1,
)

system_prompt = "你是一个ai助手,你可以调用"

es = EveryTools()


def generate_reponse(_model, _messages, _max_tokens=100):
_output = _model.create_chat_completion(
_messages,
temperature=0.2,
top_p=0.9,
stop=["<|eot_id|>", "<|end_of_text|>"],
max_tokens=_max_tokens,
)["choices"][0]["message"]["content"]
return _output

def interact_chat():
messages = [
{
"role": "system",
"content": system_prompt,
},
]
while True:
user_input = input()
if user_input == "exit":
break
messages.append(
{
"role": "user",
"content": user_input,
}
)
response = generate_reponse(model, messages)
print("Windows Assistant: ", response)

messages.append(response)


def get_path(file):
es.search(file)
res = es.results()
rows = []
for index, r in res.iterrows():
row = {}
for label, value in r.items():
if label == "path" or label == "name":
row[label] = value
rows.append(row)
return json.dumps(rows[:3])


if __name__ == '__main__':
tools = [
{
"type": "function",
"function": {
"name": "get_path",
"description": "Get the path of the file",
"parameters": {
"type": "object",
"properties": {
"file": {
"type": "string",
"description": "The name of the file, e.g. QQ.exe",
}
},
"required": ["file"],
},
}
},
{
"type": "function",
"function": {
"name": "opr_cmd",
"description": "Operate the cmd command and get the output",
"parameters": {
"type": "object",
"properties": {
"cmd": {
"type": "string",
"description": "The windows cmd command, e.g. start QQ.exe",
}
},
"required": ["cmd"],
},
}
}
]

# Debugging output to check the structure of tools
print("Tools:", tools)

# Generate the list of function names and join them
try:
# function_names = " | ".join(
# [f'''functions.{tool['function']['name']}''' for tool in tools]
# )
function_names = " | ".join(
[f'''"functions.{tool['function']['name']}:"''' for tool in tools]
)
print("Function Names:", function_names)
except TypeError as e:
print("Encountered a TypeError:", e)
except KeyError as e:
print("Encountered a KeyError:", e)

# Output the generated function names
print("Generated function names:", function_names)