diff --git a/.fdignore b/.fdignore new file mode 100644 index 0000000..f821591 --- /dev/null +++ b/.fdignore @@ -0,0 +1,4 @@ +.* +*.{png,jpeg,jpg,gif,lock} +LICENSE +docs \ No newline at end of file diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 0000000..57f9f81 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,6 @@ +!.gitignore +!* +!*/* +cache_db.json +cache_tree.json +vector_cache diff --git a/docs/cache_title.json b/docs/cache_title.json new file mode 100644 index 0000000..e748102 --- /dev/null +++ b/docs/cache_title.json @@ -0,0 +1 @@ +{"_default": {"1": {"path": "/README.md", "hash": "3f69fa5b832ce44cb6f857807345f1da", "title": "Monte Carlo Tree of Thoughts: AI Collaboration Tool"}, "2": {"path": "/README.md:1-9", "hash": "8e275167dc57fe88612f20ef61a4f501", "title": "Social Media Sharing Buttons"}, "3": {"path": "/README.md:9-12", "hash": "8c5d0c97a34933771504d8f508eae4ba", "title": "Sharing Options for Tree-of-Thoughts Project"}, "4": {"path": "/README.md:12-46", "hash": "5048d50323ad64d849a5c76437a20618", "title": "Boost Model Reasoning with ToT Algorithm"}, "5": {"path": "/README.md:48-96", "hash": "1b100a6455c06c5c6ee090564a377b3e", "title": "Generating Thoughts with Code"}, "6": {"path": "/README.md:97-140", "hash": "135b10f7c25fdc536b57af4f12ce312c", "title": "Monte Carlo Tree of Thoughts Algorithm"}, "7": {"path": "/README.md:141-162", "hash": "4b917dd2db19d74fc19fe12006f63c5c", "title": "Collaborative Problem Solving Tree"}, "8": {"path": "/example.py", "hash": "1fa61a58ff62646788e49615b202e11e", "title": "Monte Carlo Tree Search with OpenAI Model"}, "9": {"path": "/example.py:1-45", "hash": "7bc34f7463b2a62384e9889a03017b50", "title": "OpenAI Thoughts Tree Solver"}, "10": {"path": "/example.py:46-53", "hash": "2c0fa1d5700157e07aec617ae66b916b", "title": "Object Initialization and Printing Solution"}, "11": {"path": "/prompts.txt", "hash": "fb59eb0ae04b455b75a7b5e5cd47e5cf", "title": "Collaborative Tree-of-Thoughts Q&A"}, "12": {"path": "/prompts.txt:1-5", "hash": "46385a5275420768e0ab4d1eca00854c", "title": "Experts' Collaborative Thought Process"}, "13": {"path": "/prompts.txt:5-9", "hash": "43897a6a6d81b61ea3f8c07b46dff393", "title": "Collaborative Tree of Thoughts Problem Solving"}, "14": {"path": "/prompts.txt:9-9", "hash": "8d8aa5847bcc4f74fa4a8334d4205a3b", "title": "Collaborative Problem-Solving Process"}, "15": {"path": "/pyproject.toml", "hash": "050e15399ac4e7a3609dbc783c368df4", "title": "Configure Pyproject.toml for Python"}, "16": {"path": "/pyproject.toml:1-41", "hash": "d2050aaa182b4c9b3b9614b6804e97cd", "title": "Pyproject.toml: Project Configurations"}, "17": {"path": "/pyproject.toml:42-53", "hash": "9d0a86e28e6bbe1885eb681b972d6ece", "title": "Configuring Ruff and Black for Linting"}, "18": {"path": "/requirements.txt", "hash": "78ed0180d1b6a55b55034e16c55c15cd", "title": "Importing NLP, AI, and Distributed Libraries"}, "19": {"path": "/tree_of_thoughts/README.md", "hash": "ff7a130fd1bb629734f548269766c35e", "title": "Enhanced Tree of Thoughts with Advanced Search Algos and Optimized Classes"}, "20": {"path": "/tree_of_thoughts/README.md:1-36", "hash": "0c20ddae621567733f94719c15b920eb", "title": "Changelog: TreeofThoughts Refactoring"}, "21": {"path": "/tree_of_thoughts/README.md:38-60", "hash": "8d22d68805128ebce69655bc1f5ee94a", "title": "TreeofThoughts: Tree-based Search Algorithm for Problem Solving"}, "22": {"path": "/tree_of_thoughts/README.md:61-77", "hash": "4bcef7245c63d43889d734a83d10dc59", "title": "Algorithmic Search Methods"}, "23": {"path": "/tree_of_thoughts/README.md:78-94", "hash": "38e0b2d7fb1ec873627b383abbb88cfa", "title": "Pruning Threshold in Search Algorithms"}, "24": {"path": "/tree_of_thoughts/README.md:94-107", "hash": "01588e502c3ca91e8120e0feb5047d24", "title": "Recursive Depth-First Search Algorithm"}, "25": {"path": "/tree_of_thoughts/README.md:109-127", "hash": "a25b4fd82f563deef7cc89d292329746", "title": "Tree of Thoughts Search Algorithm Implementation"}, "26": {"path": "/tree_of_thoughts/README.md:128-141", "hash": "71910bade4edcc4a019b424e0c624e92", "title": "Tree of Thoughts Algorithms: BFS, DFS, Best-First"}, "27": {"path": "/tree_of_thoughts/README.md:143-154", "hash": "4597b482a6e6d2f1ae87885b82f3f956", "title": "Tree of Thoughts Search Algorithms"}, "28": {"path": "/tree_of_thoughts/README.md:156-169", "hash": "cfd5e1c094266b414820f82e359e3a3a", "title": "Optimized Tree of Thoughts: Monte Carlo and Solve Algorithms"}, "29": {"path": "/tree_of_thoughts/README.md:169-169", "hash": "47d902c94b055dd2bacddf80155c4e16", "title": "Efficient Search Algorithm with Custom Parameters"}, "30": {"path": "/tree_of_thoughts/__init__.py", "hash": "44cbcc38a86a5c66b8df67b08117cf9a", "title": "Initializing ThoughtTree Models"}, "31": {"path": "/tree_of_thoughts/base.py", "hash": "84e8ec14fd75a979ef744826b5fb719c", "title": "Abstract Base Class for Language Models"}, "32": {"path": "/tree_of_thoughts/huggingface_model.py", "hash": "a08a7f855f90cfe27917d866e1028ece", "title": "HuggingFace Model Class and Function"}, "33": {"path": "/tree_of_thoughts/huggingface_model.py:1-31", "hash": "ab0cb88cd6a5742cbbf292a15056bf9b", "title": "Generate Thoughts with Huggingface Model"}, "34": {"path": "/tree_of_thoughts/huggingface_model.py:33-58", "hash": "0d318a6ef88d60f78d399a553cc8f709", "title": "Model-based State Evaluation"}, "35": {"path": "/tree_of_thoughts/huggingface_model.py:59-64", "hash": "381481be764fb144e516ce7dc262bbe2", "title": "Error Handling and Resetting State Values"}, "36": {"path": "/tree_of_thoughts/openai_models.py", "hash": "fe9df4cd0950de94de893cfbe3a96346", "title": "OpenAI Chat API Generative Model"}, "37": {"path": "/tree_of_thoughts/openai_models.py:1-34", "hash": "2fb51d117024499333e0575892f6d8db", "title": "OpenAI Language Model Class"}, "38": {"path": "/tree_of_thoughts/openai_models.py:35-64", "hash": "669ed508f0828dde6ff11cbdeb239038", "title": "OpenAI Chat API Integration for ReAct Prompting"}, "39": {"path": "/tree_of_thoughts/openai_models.py:65-92", "hash": "ec2dfb612c577390f71d2d517160f35f", "title": "Generate Thoughts using OpenAI Models"}, "40": {"path": "/tree_of_thoughts/openai_models.py:92-110", "hash": "948d215897a26dc3ba3867b47043e1df", "title": "Reinforcement Learning from Rejected Solutions"}, "41": {"path": "/tree_of_thoughts/openai_models.py:110-122", "hash": "34cb9cc3eca232f08be0f1f557b81298", "title": "Error-Avoiding Generative Solution"}, "42": {"path": "/tree_of_thoughts/openai_models.py:123-146", "hash": "2d8e5615b305961a4e4a1c2c00f9efa7", "title": "Evaluating States with Value Strategy"}, "43": {"path": "/tree_of_thoughts/openai_models.py:147-168", "hash": "545477a43a8b81ebe51e318ac03fe987", "title": "OpenAI Model: Thought Evaluation"}, "44": {"path": "/tree_of_thoughts/openai_models.py:169-186", "hash": "f90ae75f8f52c16d465a092d223b236a", "title": "OpenAI API for State Evaluation"}, "45": {"path": "/tree_of_thoughts/treeofthoughts.py", "hash": "cefe87e87192c3f2fa7ec5c14830168d", "title": "Multi-search Algorithms Tree of Thoughts"}, "46": {"path": "/tree_of_thoughts/treeofthoughts.py:1-40", "hash": "0727ba1a1d0fb1810595ddf421c32cf0", "title": "Tree of Thoughts Class and Methods"}, "47": {"path": "/tree_of_thoughts/treeofthoughts.py:41-79", "hash": "8fb670938b8e5f02e616fe30c07b2227", "title": "Tree of Thoughts BFS Algorithm"}, "48": {"path": "/tree_of_thoughts/treeofthoughts.py:80-100", "hash": "e5708e6eb44525e657936c6fe32f25f3", "title": "Concurrent Thought Evaluation"}, "49": {"path": "/tree_of_thoughts/treeofthoughts.py:102-122", "hash": "e71d592c6701a114d9fd610f2b20331e", "title": "Pruning Threshold Adjustment"}, "50": {"path": "/tree_of_thoughts/treeofthoughts.py:124-139", "hash": "698a682252a2f950057a4d23c05ddd83", "title": "Filtering and Logging State Values"}, "51": {"path": "/tree_of_thoughts/treeofthoughts.py:141-172", "hash": "0abb0c96337f8a7d02bdbdb693a9a41d", "title": "Error-Handled Solution Finder"}, "52": {"path": "/tree_of_thoughts/treeofthoughts.py:173-203", "hash": "8a7fbd18bbf3a2c8c4f02eb32d086a4a", "title": "Depth-First Thought Evaluation"}, "53": {"path": "/tree_of_thoughts/treeofthoughts.py:204-233", "hash": "00525e96d74f65ef26b4a89b11b5b4aa", "title": "Best-First Search Algorithm with Priority Queue"}, "54": {"path": "/tree_of_thoughts/treeofthoughts.py:234-265", "hash": "8612bf16b44208366563731c42d34874", "title": "Tree of Thoughts: Thought Generation and Evaluation Algorithm"}, "55": {"path": "/tree_of_thoughts/treeofthoughts.py:266-296", "hash": "024cf99da78c85faa5902c55f190d1dd", "title": "A* Algorithm for Optimized Thought Generation"}, "56": {"path": "/tree_of_thoughts/treeofthoughts.py:297-329", "hash": "e9db49ef2ec31d4483eba4de04cec9bc", "title": "Graph Search Algorithm: Priority Queue and Path Reconstruction"}, "57": {"path": "/tree_of_thoughts/treeofthoughts.py:330-356", "hash": "d0f0443c1e2e9d5f68187222db476988", "title": "Optimizing Tree-Based State Scores"}, "58": {"path": "/tree_of_thoughts/treeofthoughts.py:357-384", "hash": "44ac31f3bcef9c9848363801e05db485", "title": "Evaluation-Guided Path Reconstruction"}, "59": {"path": "/tree_of_thoughts/treeofthoughts.py:385-416", "hash": "76872912011cf846b53941a43d78445e", "title": "Adjustable Tree of Thoughts Algorithm"}, "60": {"path": "/tree_of_thoughts/treeofthoughts.py:417-452", "hash": "fc1d14ac34ddc78c91d470d56fc2d5db", "title": "Monte Carlo Thought Generation"}, "61": {"path": "/tree_of_thoughts/treeofthoughts.py:453-478", "hash": "250c813fb73119cdd43a5e83347190c3", "title": "Transposition Table Update and UCB1 Calculation"}, "62": {"path": "/tree_of_thoughts/treeofthoughts.py:479-503", "hash": "dc2cd16def469d5206822180578c3efe", "title": "Tree Model State Selection"}, "63": {"path": "/tree_of_thoughts/treeofthoughts.py:505-507", "hash": "fd0880243f8e81fb02ec66f44b9dc37b", "title": "Valid Solution Check"}}} \ No newline at end of file diff --git a/docs/codeview.html b/docs/codeview.html new file mode 100755 index 0000000..761065f --- /dev/null +++ b/docs/codeview.html @@ -0,0 +1,669 @@ + + + + + + + + + Code View + + + + + + + + + + + + + + + + + + + + +
+

Code Preview

+
+
+
+ +
+
+ + + \ No newline at end of file diff --git a/docs/data/0.json b/docs/data/0.json new file mode 100644 index 0000000..017fc4c --- /dev/null +++ b/docs/data/0.json @@ -0,0 +1,541 @@ +{ + "0": { + "file_id": 0, + "content": "/README.md", + "type": "filepath" + }, + "1": { + "file_id": 0, + "content": "This code offers social media sharing tools for promoting the \"tree-of-thoughts\" project, which combines the Tree of Thoughts algorithm, OpenAI Language Model API, and user model connections to improve AI reasoning. It initializes a Monte Carlo Tree of Thoughts model, sets up an initial prompt, solves problems using the algorithm with specific parameters, and prints collaborative solutions.", + "type": "summary" + }, + "2": { + "file_id": 0, + "content": "[![Multi-Modality](agorabanner.png)](https://discord.gg/qUtxnK2NMf)\n![Tree of Thoughts Banner](treeofthoughts.png)\n![Discord](https://img.shields.io/discord/999382051935506503)\n[![Twitter](https://img.shields.io/twitter/url?style=social&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)](https://twitter.com/intent/tweet?text=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts!%20https://github.com/kyegomez/tree-of-thoughts)\n[![LinkedIn](https://img.shields.io/badge/Share-LinkedIn-blue?style=social&logo=linkedin)](https://www.linkedin.com/sharing/share-offsite/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)\n[![Facebook](https://img.shields.io/badge/Share-Facebook-blue?style=social&logo=facebook)](https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)\n[![Reddit](https://img.shields.io/badge/Share-Reddit-orange?style=social&logo=reddit)](https://www.reddit.com/submit?url=https%3A%", + "type": "code", + "location": "/README.md:1-9" + }, + "3": { + "file_id": 0, + "content": "This code is a README file that includes various sharing buttons for social media platforms like Discord, Twitter, LinkedIn, Facebook, and Reddit. These buttons allow users to easily share the project link with their networks.", + "type": "comment" + }, + "4": { + "file_id": 0, + "content": "2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&title=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21)\n[![Hacker News](https://img.shields.io/badge/Share-Hacker%20News-orange?style=social&logo=y-combinator)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&t=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21)\n[![Pinterest](https://img.shields.io/badge/Share-Pinterest-red?style=social&logo=pinterest)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&media=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts%2Fraw%2Fmain%2Ftree-of-thoughts.jpeg&description=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21)\n[![WhatsApp](https://img.shields.io/badge/Share-WhatsApp-green?style=social&logo=whatsapp)](https://api.whatsapp.com/send?text=Check", + "type": "code", + "location": "/README.md:9-12" + }, + "5": { + "file_id": 0, + "content": "Code snippet provides sharing options for the \"tree-of-thoughts\" project on various social media platforms like Hacker News, Pinterest, and WhatsApp using badges. The links are prefilled with a message to promote the project's purpose: improving AI reasoning through the Tree of Thoughts.", + "type": "comment" + }, + "6": { + "file_id": 0, + "content": "%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21%20https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)\n[Paper link](https://arxiv.org/pdf/2305.10601.pdf)\n[Author's implementation](https://github.com/princeton-nlp/tree-of-thought-llm)\n## Introduction\nTree of Thoughts (ToT) is a powerful and flexible algorithm that significantly advances model reasoning by up to 70%. This plug-and-play version allows you to connect your own models and experience superintelligence!\n## Install\n```bash\npip install tree-of-thoughts\n```\n## Usage\n```python\nimport os\nfrom tree_of_thoughts.openai_models import OpenAILanguageModel\nfrom tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts\nfrom dotenv import load_dotenv\nload_dotenv()\napi_key = os.environ.get(\"OPENAI_API_KEY\")\n# Initialize the OpenAILanguageModel class with the API key\nmodel = OpenAILanguageModel(api_key=api_key)\n# Initialize the MonteCarloTreeofThoughts class with the model\ntree_of_thoughts = MonteCarloTreeofThoughts(model)", + "type": "code", + "location": "/README.md:12-46" + }, + "7": { + "file_id": 0, + "content": "This code installs the Tree of Thoughts algorithm, which significantly improves model reasoning by up to 70%. Users can connect their own models and experience superintelligence. The code imports necessary classes, initializes an OpenAILanguageModel with an API key, and initializes a MonteCarloTreeofThoughts class with the model for improved reasoning.", + "type": "comment" + }, + "8": { + "file_id": 0, + "content": "# Define the initial prompt\ninitial_prompt = \"\"\"\nInput: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation\nPossible next steps:\n\"\"\"\n# Define the number of thoughts to generate\nnum_thoughts = 1\nmax_steps = 3\nmax_states = 4\npruning_threshold = 0.5\n# Generate the thoughts\nsolution = tree_of_thoughts.solve(\n initial_prompt=initial_prompt,\n num_thoughts=num_thoughts,\n max_steps=max_steps,\n max_states=max_states,\n pruning_threshold=pruning_threshold,\n # sleep_time=sleep_time\n)\nprint(f\"Solution: {solution}\")\n```\n### ToT with HF LLM\nTo run Hugging Face Transformers with Tree of Thoughts:\n```python\nfrom tree_of_thoughts import TreeofThoughts, HuggingLanguageModel, MonteCarloTreeofThoughts\nmodel_name=\"01-ai/Yi-34B\"\nmodel = HuggingLanguageModel(model_name, ", + "type": "code", + "location": "/README.md:48-96" + }, + "9": { + "file_id": 0, + "content": "The code is initializing the initial prompt, defining parameters for generating thoughts (e.g., number of thoughts, maximum steps and states), and setting a pruning threshold for tree search. It then calls the solve function from tree_of_thoughts module to generate thoughts using the Hugging Face Transformers language model with the given parameters. The solution is printed at the end.", + "type": "comment" + }, + "10": { + "file_id": 0, + "content": " model_tokenizer=model_name, \n verbose=True)\n# Initialize the MonteCarloTreeofThoughts class with the model\ntree_of_thoughts = MonteCarloTreeofThoughts(model)\n# Note to reproduce the same results from the tree of thoughts paper if not better, \n# craft an 1 shot chain of thought prompt for your task below\ninitial_prompt = \"\"\"\nInput: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation\nPossible next steps:\n\"\"\"\nnum_thoughts = 1\nmax_steps = 3\nmax_states = 4\npruning_threshold = 0.5\nsolution = tree_of_thoughts.solve(\n initial_prompt=initial_prompt,\n num_thoughts=num_thoughts, \n max_steps=max_steps, \n max_states=max_states, \n pruning_threshold=pruning_threshold,\n # sleep_time=sleep_time", + "type": "code", + "location": "/README.md:97-140" + }, + "11": { + "file_id": 0, + "content": "The code initializes a MonteCarloTreeofThoughts model, sets up an initial prompt, and then solves the problem using the Tree of Thoughts algorithm with specified parameters.", + "type": "comment" + }, + "12": { + "file_id": 0, + "content": ")\nprint(f\"Solution: {solution}\")\n```\n### Basic Prompts\n- Copy and paste this into your llm!\n```\n\"Three experts with exceptional logical thinking skills are collaboratively answering a question using the tree of thoughts method. Each expert will share their thought process in detail, taking into account the previous thoughts of others and admitting any errors. They will iteratively refine and expand upon each other's ideas, giving credit where it's due. The process continues until a conclusive answer is found. Organize the entire response in a markdown table format. The task is:\n```\n# Acknowledgements\nThanks to: Shunyu Yao Princeton University, Dian Yu Google DeepMind, Jeffrey Zhao, Google DeepMind, Izhak Shafran Google DeepMind, Thomas L. Griffiths, Princeton University, Yuan Cao Google DeepMind, Karthik Narasimha, Princeton University for sharing this amazing work with the world!\nAnd, thanks to Phil Wang or Lucidrains for inspiring me to devote myself to open source AI Research\n# License\nApache", + "type": "code", + "location": "/README.md:141-162" + }, + "13": { + "file_id": 0, + "content": "Code snippet for printing the solution obtained after solving a problem collaboratively using the tree of thoughts method.", + "type": "comment" + }, + "14": { + "file_id": 1, + "content": "/example.py", + "type": "filepath" + }, + "15": { + "file_id": 1, + "content": "The code imports OpenAI language model, uses Monte Carlo Tree of Thoughts algorithm to solve a problem defined by the initial prompt, and prints the solution with specified parameters.", + "type": "summary" + }, + "16": { + "file_id": 1, + "content": "import os\nfrom tree_of_thoughts.openai_models import OpenAILanguageModel\nfrom tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts\nfrom dotenv import load_dotenv\nload_dotenv()\napi_key = os.environ.get(\"OPENAI_API_KEY\")\n# Initialize the OpenAILanguageModel class with the API key\nmodel = OpenAILanguageModel(api_key=api_key)\n# Initialize the MonteCarloTreeofThoughts class with the model\ntree_of_thoughts = MonteCarloTreeofThoughts(model)\n# Define the initial prompt\ninitial_prompt = \"\"\"\nInput: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation\nPossible next steps:\n\"\"\"\n# Define the number of thoughts to generate\nnum_thoughts = 1\nmax_steps = 3\nmax_states = 4\npruning_threshold = 0.5\n# Generate the thoughts\nsolution = tree_of_thoughts.solve(\n initial_prompt=initial_prompt,", + "type": "code", + "location": "/example.py:1-45" + }, + "17": { + "file_id": 1, + "content": "Code imports necessary packages and initializes an OpenAI language model, Monte Carlo Tree of Thoughts algorithm, sets initial prompt and parameters for generating thoughts. It then uses the Tree of Thoughts algorithm to solve a problem defined by the initial prompt.", + "type": "comment" + }, + "18": { + "file_id": 1, + "content": " num_thoughts=num_thoughts,\n max_steps=max_steps,\n max_states=max_states,\n pruning_threshold=pruning_threshold,\n # sleep_time=sleep_time\n)\nprint(f\"Solution: {solution}\")", + "type": "code", + "location": "/example.py:46-53" + }, + "19": { + "file_id": 1, + "content": "This code snippet is initializing an object with parameters, and then printing the solution. The variables represent the number of thoughts, maximum steps, maximum states, pruning threshold (likely for optimization), and possibly a sleep time (though commented out).", + "type": "comment" + }, + "20": { + "file_id": 2, + "content": "/prompts.txt", + "type": "filepath" + }, + "21": { + "file_id": 2, + "content": "The code details a tree-of-thoughts approach for collaboratively answering a question through iterative refinement and markdown table formatted response.", + "type": "summary" + }, + "22": { + "file_id": 2, + "content": "Imagine three different experts are answering this question. All experts will write down 1 step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is...\nSimulate three brilliant, logical experts collaboratively answering a question. Each one verbosely explains their thought process in real-time, considering the prior explanations of others and openly acknowledging mistakes. At each step, whenever possible, each expert refines and builds upon the thoughts of others, acknowledging their contributions. They continue until there is a definitive answer to the question. For clarity, your entire response should be in a markdown table. The question is...\nImagine three highly intelligent experts working together to answer a question. They will follow a tree of thoughts approach, where each expert shares their thought process step by step. They will consider the input fr", + "type": "code", + "location": "/prompts.txt:1-5" + }, + "23": { + "file_id": 2, + "content": "The code describes a simulation of three brilliant experts collaboratively answering a question in a tree-of-thoughts approach, sharing their thought process step by step while considering and building upon each other's thoughts.", + "type": "comment" + }, + "24": { + "file_id": 2, + "content": "om others, refine their thoughts, and build upon the group's collective knowledge. If an expert realizes their thought is incorrect, they will acknowledge it and withdraw from the discussion. Continue this process until a definitive answer is reached. Present the entire response in a markdown table. The question is...\nThree experts with exceptional logical thinking skills are collaboratively answering a question using a tree of thoughts method. Each expert will share their thought process in detail, taking into account the previous thoughts of others and admitting any errors. They will iteratively refine and expand upon each other's ideas, giving credit where it's due. The process continues until a conclusive answer is found. Organize the entire response in a markdown table format. The question is...\nEnvision a group of three experts working in unison to tackle a question by employing a tree of thoughts strategy. Each expert will thoroughly explain their line of thinking at every step, w", + "type": "code", + "location": "/prompts.txt:5-9" + }, + "25": { + "file_id": 2, + "content": "Code describes a collaborative problem-solving process where three experts employ the tree of thoughts method to answer a question. They iteratively refine and expand upon each other's ideas, admit errors, and credit others. The final response will be presented in a markdown table format.", + "type": "comment" + }, + "26": { + "file_id": 2, + "content": "hile also considering the insights provided by their peers. They will openly recognize any mistakes and build upon the group's shared understanding. This iterative process will continue until a definitive solution is reached. Structure the entire response as a markdown table. The question is...", + "type": "code", + "location": "/prompts.txt:9-9" + }, + "27": { + "file_id": 2, + "content": "Code snippet describes a collaborative problem-solving process where team members contribute ideas, recognize mistakes, and iterate until reaching a solution. The response should be formatted as a markdown table.", + "type": "comment" + }, + "28": { + "file_id": 3, + "content": "/pyproject.toml", + "type": "filepath" + }, + "29": { + "file_id": 3, + "content": "The given code configures the \"pyproject.toml\" file to define project details, specify dependencies, and set up linting and formatting for Python code using Ruff and Black. It enables aggressive mode, ignores certain files, and previews changes before applying them.", + "type": "summary" + }, + "30": { + "file_id": 3, + "content": "[tool.poetry]\nname = \"tree-of-thoughts\"\nversion = \"0.3.9\"\ndescription = \"Tree of Thoughts - Pytorch\"\nauthors = [\"Kye Gomez \"]\nlicense = \"MIT\"\nreadme = \"README.md\" # Assuming you have a README.md file\nhomepage = \"https://github.com/kyegomez/tree-of-thoughts\"\nkeywords = [\"artificial intelligence\", \"deep learning\", \"optimizers\", \"Prompt Engineering\"]\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n]\n[tool.poetry.dependencies]\npython = \"^3.6\"\ntransformers = \"*\"\nswarms = \"*\"\n[tool.poetry.dev-dependencies]\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n[tool.poetry.group.lint.dependencies]\nruff = \"^0.0.249\"\ntypes-toml = \"^0.10.8.1\"\ntypes-redis = \"^4.3.21.6\"\ntypes-pytz = \"^2023.3.0.0\"\nblack = \"^23.1.0\"\ntypes-chardet = \"^5.0.4.6\"\nmypy-protobuf = \"^3.0.0\"\n[tool.autopep8]\nmax_line_length = 80", + "type": "code", + "location": "/pyproject.toml:1-41" + }, + "31": { + "file_id": 3, + "content": "This code defines a project's details in the pyproject.toml file, including name, version, author, license, keywords, and classifiers. It also specifies dependencies and development dependencies for the project.", + "type": "comment" + }, + "32": { + "file_id": 3, + "content": "ignore = \"E501,W6\" # or [\"E501\", \"W6\"]\nin-place = true\nrecursive = true\naggressive = 3\n[tool.ruff]\nline-length = 80\n[tool.black]\nline-length = 80\ntarget-version = ['py38']\npreview = true", + "type": "code", + "location": "/pyproject.toml:42-53" + }, + "33": { + "file_id": 3, + "content": "The code configures the \"pyproject.toml\" file for linting and formatting Python code using Ruff and Black. It sets an ignore list, enables in-place correction, recursive scanning, and aggressive mode with a threshold of 3. The line length is set to 80, target version is py38, and previews changes before applying them.", + "type": "comment" + }, + "34": { + "file_id": 4, + "content": "/requirements.txt", + "type": "filepath" + }, + "35": { + "file_id": 4, + "content": "The code snippet is importing four libraries - transformers, openai, langchain, and swarms. These libraries are commonly used for natural language processing (transformers), text generation (openai), working with language models (langchain), and distributed computing tasks (swarms).", + "type": "summary" + }, + "36": { + "file_id": 4, + "content": "transformers\nopenai\nlangchain\nswarms", + "type": "code", + "location": "/requirements.txt:1-4" + }, + "37": { + "file_id": 4, + "content": "The code snippet is importing four libraries - transformers, openai, langchain, and swarms. These libraries are commonly used for natural language processing (transformers), text generation (openai), working with language models (langchain), and distributed computing tasks (swarms).", + "type": "comment" + }, + "38": { + "file_id": 5, + "content": "/tree_of_thoughts/README.md", + "type": "filepath" + }, + "39": { + "file_id": 5, + "content": "The code updates the TreeofThoughts class's changelog, adds search parameters, and includes BFS, DFS, Monte Carlo, Best First Search, and A* Search classes. The code features two inherited classes: MonteCarloTreeofThoughts for Monte Carlo Tree Search optimization and OptimizedTreeofThoughts for enhanced Tree of Thoughts algorithm using optimized search parameters.", + "type": "summary" + }, + "40": { + "file_id": 5, + "content": "# Comprehensive Documentation and Changelog\nThis document provides a comprehensive overview of the changes made to the TreeofThoughts class and its methods to improve readability and understandability. The changes include updating variable names to be more meaningful and descriptive, as well as modifying the structure of the code for better readability.\n## Changelog\n1. TreeofThoughts Class\nUpdated the class definition to include a more descriptive docstring.\n2. __init__ Method\nNo changes were made to the __init__ method.\n3. solve Method\nUpdated variable names:\nx -> initial_prompt\nk -> num_thoughts\nT -> max_steps\nb -> max_states\nvth -> value_threshold\n4. tot_bfs Method\nUpdated variable names:\nx -> initial_prompt\nk -> num_thoughts\nT -> max_steps\nb -> max_states\nS0 -> current_states\nS0_t -> generated_states\nVt -> state_values\nSt -> selected_states\n5. tot_dfs Method\nUpdated variable names:\nx -> initial_prompt\nk -> num_thoughts\nT -> max_steps\nvth -> value_threshold\ns -> state\nt -> step\ns_prime -> next_state\nchild -> child_state", + "type": "code", + "location": "/tree_of_thoughts/README.md:1-36" + }, + "41": { + "file_id": 5, + "content": "The code provides a comprehensive changelog for the TreeofThoughts class, updating variable names and modifying code structure for better readability. The solve method updated variable names while keeping the __init__ method unchanged. Various methods have been renamed and updated with more meaningful variable names.", + "type": "comment" + }, + "42": { + "file_id": 5, + "content": "### Added optional parameters for better control over the search process:\npruning_threshold\nconfidence_threshold\nmax_iterations\nconvergence_threshold\nconvergence_count\n6. save_tree_to_json Method\nNo changes were made to the save_tree_to_json method.\n7. print_tree Method\nNo changes were made to the print_tree method.\n# Documentation\nTreeofThoughts Class\nThe TreeofThoughts class is designed to solve problems using a tree-based search algorithm. It takes a model and a search algorithm (either 'BFS' or 'DFS') as input and provides methods to solve problems using the chosen algorithm.\n## Initialization\nThe __init__ method initializes the TreeofThoughts class with the given model and search algorithm. It also initializes an empty tree structure to store the search results.\n## Solve Method\nThe solve method is the main entry point for solving problems using the TreeofThoughts class. It takes the following parameters:\ninitial_prompt: The initial problem or prompt to be solved.\nnum_thoughts: The number of thoughts to generate at each step.", + "type": "code", + "location": "/tree_of_thoughts/README.md:38-60" + }, + "43": { + "file_id": 5, + "content": "Code snippet outlines the initialization, main entry point for solving problems (solve method), and optional parameters added for better control over the search process. No changes were made to save_tree_to_json or print_tree methods. The code introduces TreeofThoughts class which utilizes a tree-based search algorithm to solve problems using given model and search algorithm.", + "type": "comment" + }, + "44": { + "file_id": 5, + "content": "max_steps: The maximum number of steps to perform in the search.\nmax_states: The maximum number of states to consider at each step (for BFS).\nvalue_threshold: The threshold value for pruning states (for DFS).\ntimeout: The maximum time allowed for the search process.\nconfidence_threshold: The confidence threshold for stopping the search.\nmax_iterations: The maximum number of iterations allowed for the search.\nconvergence_threshold: The threshold for determining convergence.\nconvergence_count: The number of consecutive convergences required to stop the search.\nBased on the chosen search algorithm, the solve method calls either the tot_bfs or tot_dfs method to perform the search.\n## tot_bfs Method\nThe tot_bfs method performs a breadth-first search to solve the problem. It takes the following parameters:\ninitial_prompt: The initial problem or prompt to be solved.\nnum_thoughts: The number of thoughts to generate at each step.\nmax_steps: The maximum number of steps to perform in the search.\nmax_states: The maximum number of states to consider at each step.", + "type": "code", + "location": "/tree_of_thoughts/README.md:61-77" + }, + "45": { + "file_id": 5, + "content": "The code defines maximum search parameters and uses either BFS or DFS to solve the problem based on the chosen algorithm. The tot_bfs method performs a breadth-first search with given parameters, while the solve method calls the appropriate search method depending on the chosen algorithm.", + "type": "comment" + }, + "46": { + "file_id": 5, + "content": "pruning_threshold: The threshold value for pruning states.\nThe method generates and evaluates states at each step, selecting the best states based on their values. The search continues until the maximum number of steps is reached, and the best state is returned.\n## tot_dfs Method\nThe tot_dfs method performs a depth-first search to solve the problem. It takes the following parameters:\ninitial_prompt: The initial problem or prompt to be solved.\nnum_thoughts: The number of thoughts to generate at each step.\nmax_steps: The maximum number of steps to perform in the search.\nvalue_threshold: The threshold value for pruning states.\npruning_threshold: The threshold value for pruning states based on their values.\nconfidence_threshold: The confidence threshold for stopping the search.\nmax_iterations: The maximum number of iterations allowed for the search.\nconvergence_threshold: The threshold for determining convergence.\nconvergence_count: The number of consecutive convergences required to stop the search.\nTh", + "type": "code", + "location": "/tree_of_thoughts/README.md:78-94" + }, + "47": { + "file_id": 5, + "content": "The `pruning_threshold` is a value used to prune states during the search process. The method generates and evaluates states at each step, selecting the best states based on their values until either the maximum number of steps is reached or the best state is found.", + "type": "comment" + }, + "48": { + "file_id": 5, + "content": "e method uses a recursive depth-first search approach to explore the state space. It generates and evaluates states at each step, and if a state's value is above the value_threshold and pruning_threshold, it continues the search with the new state. The search stops when the maximum number of steps is reached, the confidence threshold is met, or the convergence criteria are satisfied. The best state is then returned.\n## save_tree_to_json Method\nThe save_tree_to_json method saves the current tree structure and metrics to a JSON file. It takes the following parameter:\nfile_name: The name of the JSON file to save the tree structure and metrics.\nThis method is useful for logging the search process and analyzing the results later.\n## print_tree Method\nThe print_tree method prints the tree structure in a human-readable format. It takes the following parameters:\nnode: The current node in the tree.\ndepth: The depth of the current node in the tree (default is 0).\nThis method is useful for visualizing the tree structure and understanding the search process.", + "type": "code", + "location": "/tree_of_thoughts/README.md:94-107" + }, + "49": { + "file_id": 5, + "content": "This code describes a recursive depth-first search algorithm for exploring a state space. It evaluates states and stops based on thresholds, maximum steps, or convergence criteria. The save_tree_to_json method saves the tree structure and metrics to a JSON file, while the print_tree method visualizes the tree structure.", + "type": "comment" + }, + "50": { + "file_id": 5, + "content": "## Usage\nTo use the TreeofThoughts class, follow these steps:\nInitialize the class with a model and a search algorithm (either 'BFS' or 'DFS').\nCall the solve method with the required parameters to perform the search and obtain the best state.\n(Optional) Use the save_tree_to_json method to save the tree structure and metrics to a JSON file.\n(Optional) Use the print_tree method to visualize the tree structure.\nHere's an example of how to use the TreeofThoughts class:\n# V2 with Monte Carlo, A* Search Algorithm, BFS, Best First Search\n### Class: TreeofThoughts\nThis class represents the base class for the Tree of Thoughts search algorithm. It contains the following methods:\n- `__init__(self, model)`: Initializes the TreeofThoughts object with the given model.\n- `save_tree_to_json(self, file_name)`: Saves the tree to a JSON file with the given file name.\n- `logNewState(self, state, evaluation)`: Logs a new state and its evaluation to the tree.\n- `adjust_pruning_threshold_precentile(self, evaluated_thoughts, percentile)`: Adjusts the pruning threshold based on the percentile of evaluated thoughts.", + "type": "code", + "location": "/tree_of_thoughts/README.md:109-127" + }, + "51": { + "file_id": 5, + "content": "The code defines the \"TreeofThoughts\" class for implementing a Tree of Thoughts search algorithm, with methods like initialization, saving tree structure to JSON file, logging new state and adjusting pruning threshold percentile based on evaluated thoughts. The example provided demonstrates using the class with Monte Carlo, A* Search Algorithm, BFS, Best First Search.", + "type": "comment" + }, + "52": { + "file_id": 5, + "content": "- `adjust_pruning_threshold_moving_average(self, evaluated_thoughts, window_size)`: Adjusts the pruning threshold based on the moving average of evaluated thoughts.\n### Class: TreeofThoughtsBFS\nThis class represents the Breadth-First Search (BFS) variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method:\n- `solve(self, initial_prompt, num_thoughts, max_steps, max_states, value_threshold, pruning_threshold=0.5)`: Solves the problem using BFS with the given parameters.\n### Class: TreeofThoughtsDFS\nThis class represents the Depth-First Search (DFS) variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method:\n- `solve(self, initial_prompt, num_thoughts, max_steps, value_threshold, pruning_threshold=0.5)`: Solves the problem using DFS with the given parameters.\n### Class: TreeofThoughtsBEST\nThis class represents the Best-First Search variant of the Tree of Thoughts search algorithm. It contains the following methods:", + "type": "code", + "location": "/tree_of_thoughts/README.md:128-141" + }, + "53": { + "file_id": 5, + "content": "The code defines three classes: TreeofThoughtsBFS, TreeofThoughtsDFS, and TreeofThoughtsBEST. Each class represents a different search algorithm variant of the Tree of Thoughts search algorithm. The classes inherit from the TreeofThoughts class and contain methods to solve problems using BFS, DFS, or Best-First Search with given parameters.", + "type": "comment" + }, + "54": { + "file_id": 5, + "content": "- `__init__(self, model)`: Initializes the TreeofThoughtsBEST object with the given model.\n- `save_tree_to_json(self, file_name)`: Saves the tree to a JSON file with the given file name.\n- `log_new_state(self, state, evaluation)`: Logs a new state and its evaluation to the tree.\n- `solve(self, initial_prompt, num_thoughts, max_steps, pruning_threshold)`: Solves the problem using Best-First Search with the given parameters.\n### Class: TreeofThoughtsASearch\nThis class represents the A* Search variant of the Tree of Thoughts search algorithm. It contains the following methods:\n- `__init__(self, model)`: Initializes the TreeofThoughtsASearch object with the given model.\n- `solve(self, initial_prompt, num_thoughts=5, max_steps=30, pruning_threshold=0.4)`: Solves the problem using A* Search with the given parameters.\n- `is_goal(self, state, score)`: Determines if the given state is a goal state based on its score.\n- `reconstruct_path(self, came_from, current_state, initial_prompt)`: Reconstructs the path from the initial state to the current state using the came_from dictionary.", + "type": "code", + "location": "/tree_of_thoughts/README.md:143-154" + }, + "55": { + "file_id": 5, + "content": "This code snippet represents two classes, TreeofThoughtsBEST and TreeofThoughtsASearch, for solving problems using the Best-First Search and A* Search variants of the Tree of Thoughts algorithm. The classes have initialization methods (__init__), a method to save trees in JSON format (save_tree_to_json), a method to log new states with their evaluations (log_new_state), and a solve method that takes initial prompt, number of thoughts, maximum steps, and pruning threshold as parameters. The TreeofThoughtsASearch class also has an is_goal method for determining if a state is a goal state based on its score and a reconstruct_path method to reconstruct the path from the initial state to the current state using the came_from dictionary.", + "type": "comment" + }, + "56": { + "file_id": 5, + "content": "### Class: MonteCarloTreeofThoughts\nThis class represents the Monte Carlo Tree Search variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following methods:\n- `__init__(self, model, objective=\"balance\")`: Initializes the MonteCarloTreeofThoughts object with the given model and objective.\n- `optimize_params(self, num_thoughts, max_steps, max_states)`: Optimizes the search parameters based on the objective.\n- `solve(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold)`: Solves the problem using\n Monte Carlo Tree Search with the given parameters.\n- `monte_carlo_search(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold)`: Performs the Monte Carlo Tree Search with the given parameters.\n### Class: OptimizedTreeofThoughts\nThis class represents an optimized version of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method:\n- `solve(self,", + "type": "code", + "location": "/tree_of_thoughts/README.md:156-169" + }, + "57": { + "file_id": 5, + "content": "The code defines two classes, MonteCarloTreeofThoughts and OptimizedTreeofThoughts, both of which are derived from TreeofThoughts. MonteCarloTreeofThoughts uses Monte Carlo Tree Search to solve problems while optimizing search parameters based on the objective. On the other hand, OptimizedTreeofThoughts provides an optimized version of the Tree of Thoughts algorithm with a solve method.", + "type": "comment" + }, + "58": { + "file_id": 5, + "content": " x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None)`: Solves the problem using an optimized search algorithm with the given parameters.", + "type": "code", + "location": "/tree_of_thoughts/README.md:169-169" + }, + "59": { + "file_id": 5, + "content": "Solves problem with optimized search algorithm using given parameters (x, k, T, b, vth, timeout, confidence_threshold, max_iterations, convergence_threshold, convergence_count).", + "type": "comment" + }, + "60": { + "file_id": 6, + "content": "/tree_of_thoughts/__init__.py", + "type": "filepath" + }, + "61": { + "file_id": 6, + "content": "This code imports various language models and thought tree modules from different packages and adds them to the `__all__` list for importing in this package. It includes OpenAI's LanguageModel, TreeofThoughts, MonteCarloTreeofThoughts, and several search algorithm implementations of the ThoughtTree.", + "type": "summary" + }, + "62": { + "file_id": 6, + "content": "from tree_of_thoughts.base import AbstractLanguageModel\n#from tree_of_thoughts.huggingface_model import (\n #HuggingLanguageModel,\n#)\nfrom tree_of_thoughts.openai_models import (\n OpenAILanguageModel,\n)\nfrom tree_of_thoughts.treeofthoughts import (\n MonteCarloTreeofThoughts,\n TreeofThoughts,\n TreeofThoughtsASearch,\n TreeofThoughtsBEST,\n TreeofThoughtsBFS,\n TreeofThoughtsDFS,\n)\n__all__ = [\n \"OpenAILanguageModel\",\n \"TreeofThoughts\",\n \"MonteCarloTreeofThoughts\",\n \"TreeofThoughtsBFS\",\n \"TreeofThoughtsDFS\",\n \"TreeofThoughtsBEST\",\n \"TreeofThoughtsASearch\",\n \"AbstractLanguageModel\",\n \"HuggingLanguageModel\",\n]", + "type": "code", + "location": "/tree_of_thoughts/__init__.py:1-27" + }, + "63": { + "file_id": 6, + "content": "This code imports various language models and thought tree modules from different packages and adds them to the `__all__` list for importing in this package. It includes OpenAI's LanguageModel, TreeofThoughts, MonteCarloTreeofThoughts, and several search algorithm implementations of the ThoughtTree.", + "type": "comment" + }, + "64": { + "file_id": 7, + "content": "/tree_of_thoughts/base.py", + "type": "filepath" + }, + "65": { + "file_id": 7, + "content": "This code defines an abstract base class `AbstractLanguageModel` for language models with two required methods: `generate_thoughts` and `evaluate_states`. It uses the ABC (Abstract Base Classes) module from Python's `abc` library.", + "type": "summary" + }, + "66": { + "file_id": 7, + "content": "from abc import ABC, abstractmethod\nclass AbstractLanguageModel(ABC):\n @abstractmethod\n def generate_thoughts(self, state, k):\n pass\n @abstractmethod\n def evaluate_states(self, states):\n pass", + "type": "code", + "location": "/tree_of_thoughts/base.py:1-11" + }, + "67": { + "file_id": 7, + "content": "This code defines an abstract base class `AbstractLanguageModel` for language models with two required methods: `generate_thoughts` and `evaluate_states`. It uses the ABC (Abstract Base Classes) module from Python's `abc` library.", + "type": "comment" + }, + "68": { + "file_id": 8, + "content": "/tree_of_thoughts/huggingface_model.py", + "type": "filepath" + }, + "69": { + "file_id": 8, + "content": "The `HuggingLanguageModel` class generates solutions using the HuggingfaceLLM model, while the function evaluates states and handles errors by printing them and resetting state values to 0.", + "type": "summary" + }, + "70": { + "file_id": 8, + "content": "from swarms.models import HuggingfaceLLM\nclass HuggingLanguageModel:\n def __init__(\n self, model_name, model_tokenizer=None, verbose=False, *args, **kwargs\n ):\n self.model = HuggingfaceLLM(model_name, *args, **kwargs)\n self.verbose = verbose\n def generate_thoughts(self, state, k, max_length=100):\n state_text = \" \".join(state)\n prompt = (\n \"Write down your observations in format 'Observation:xxxx', then\"\n \" write down your thoughts in format 'Thoughts:xxxx Given the\"\n f\" current state of reasoning: '{state_text}', generate\"\n f\" {k} coherent solutions to achieve {state_text}\"\n )\n if self.verbose:\n print(f\"Generating thoughts for state: {state_text}\")\n try:\n self.model.run(prompt)\n except Exception as e:\n if self.verbose:\n print(f\"Error generating thoughts for state: {state_text}\")\n print(f\"Error: {e}\")\n thoughts = []\n return thoughts", + "type": "code", + "location": "/tree_of_thoughts/huggingface_model.py:1-31" + }, + "71": { + "file_id": 8, + "content": "This code defines a class `HuggingLanguageModel` that uses a HuggingfaceLLM model for generating coherent solutions based on a given state. The `generate_thoughts` method takes a state, number of thoughts to generate (k), and optional maximum length of generated thoughts, and returns the generated thoughts as a list. If an error occurs while generating thoughts, it prints an error message and returns an empty list.", + "type": "comment" + }, + "72": { + "file_id": 8, + "content": " def evaluate_states(self, states, initial_prompt, max_length=10):\n state_values = {}\n for state in states:\n state_text = \" \".join(state)\n prompt = (\n f\"Given the current state of reasoning: '{state_text}',\"\n \" pessimitically evaluate its value as a float between 0 and 1\"\n f\" based on it's potential to achieve {initial_prompt}\"\n )\n if self.verbose:\n print(f\"Evaluating state: {state_text}\")\n try:\n value_text = self.model(prompt)\n value = float(value_text)\n except ValueError:\n if self.verbose:\n print(\n \"Error converting value to float for state:\"\n f\" {state_text}\"\n )\n value = 0 # Assign a default value if the conversion fails\n except Exception as e:\n if self.verbose:\n print(f\"Error evaluating state: {state_text}\")", + "type": "code", + "location": "/tree_of_thoughts/huggingface_model.py:33-58" + }, + "73": { + "file_id": 8, + "content": "This function evaluates states and their potential to achieve a given prompt using the model. It joins state elements into a string, constructs a prompt for the model, tries to predict value as float between 0 and 1, handles ValueError if conversion fails, assigning default value, and handles other exceptions.", + "type": "comment" + }, + "74": { + "file_id": 8, + "content": " print(f\"Error: {e}\")\n value = 0\n state_values[state] = value\n return state_values", + "type": "code", + "location": "/tree_of_thoughts/huggingface_model.py:59-64" + }, + "75": { + "file_id": 8, + "content": "Catching and printing an error, then setting state values to 0 if an error occurs.", + "type": "comment" + }, + "76": { + "file_id": 9, + "content": "/tree_of_thoughts/openai_models.py", + "type": "filepath" + }, + "77": { + "file_id": 9, + "content": "This code initializes an OpenAI language model, generating text using the Chat API. It supports ReAct prompting, generates solutions, learns from errors, and evaluates states with an API.", + "type": "summary" + }, + "78": { + "file_id": 9, + "content": "import logging\nfrom tree_of_thoughts.base import AbstractLanguageModel\nfrom swarms.models import OpenAIChat\n# Logging\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\"\n)\nlogger = logging.getLogger(__name__)\nclass OpenAILanguageModel(AbstractLanguageModel):\n \"\"\"\n OpenAI Language Model\n Args:\n api_key (str): OpenAI API key\n strategy (str): Strategy for generating thoughts. Choose from 'cot' (Chain of Thoughts) or 'gpt' (GPT-3)\n evaluation_strategy (str): Strategy for evaluating thoughts. Choose from 'value' or 'vote'\n api_base (str): Base path for OpenAI API\n api_model (str): Model name for OpenAI API\n enable_ReAct_prompting (bool): Enable ReAct prompting\n Examples:\n >>> from tree_of_thoughts.models.openai_models import OpenAILanguageModel\n >>> model = OpenAILanguageModel(api_key=api_key)\n >>> model.generate_thoughts(state, k)\n >>> model.evaluate_states(states, initial_prompt)\n \"\"\"\n def __init__(", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:1-34" + }, + "79": { + "file_id": 9, + "content": "This code is initializing a logging configuration and defining the OpenAILanguageModel class, which extends AbstractLanguageModel. The OpenAILanguageModel is an OpenAI language model with options for strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting, and more. It allows for generating and evaluating thoughts using OpenAI API.", + "type": "comment" + }, + "80": { + "file_id": 9, + "content": " self,\n api_key,\n strategy=\"cot\",\n evaluation_strategy=\"value\",\n enable_ReAct_prompting=True,\n *args,\n **kwargs,\n ):\n self.api_key = api_key\n self.use_chat_api = True\n self.enable_ReAct_prompting = enable_ReAct_prompting\n self.strategy = strategy\n self.evaluation_strategy = evaluation_strategy\n # reference : https://www.promptingguide.ai/techniques/react\n self.ReAct_prompt = \"\"\n if enable_ReAct_prompting:\n self.ReAct_prompt = (\n \"Write down your observations in format 'Observation:xxxx',\"\n \" then write down your thoughts in format 'Thoughts:xxxx'.\"\n )\n self.model = OpenAIChat(openai_api_key=api_key, *args, **kwargs)\n def generate_text(self, prompt: str, k: int = 3):\n \"\"\"Generate text from prompt using OpenAI API\"\"\"\n if self.use_chat_api:\n thoughts = []\n for _ in range(k):\n response = self.model(prompt)", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:35-64" + }, + "81": { + "file_id": 9, + "content": "Class initializes OpenAI Chat API for generating text. It allows ReAct prompting, which requires a specific format of observations and thoughts. Uses generate_text() function to produce text based on the given prompt.", + "type": "comment" + }, + "82": { + "file_id": 9, + "content": " thoughts += [response]\n # print(f'thoughts: {thoughts}')\n return thoughts\n def generate_thoughts(\n self, state, k, initial_prompt, rejected_solutions=None\n ):\n \"\"\"\n Generate thoughts from state using OpenAI API\n Args:\n state (str or list): State of reasoning\n k (int): Number of thoughts to generate\n initial_prompt (str): Initial prompt\n rejected_solutions (list): List of rejected solutions\n Returns:\n list: List of thoughts\n \"\"\"\n if type(state) == str:\n state_text = state\n else:\n state_text = \"\\n\".join(state)\n print(\"New state generating thought:\", state, \"\\n\\n\")\n prompt = f\"\"\"You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of de", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:65-92" + }, + "83": { + "file_id": 9, + "content": "The code defines a class with two methods: `generate_thoughts` and `openai_models`. The `generate_thoughts` method takes a state, number of thoughts to generate, initial prompt, and list of rejected solutions as input. It converts the state into a string and passes it along with other parameters to the `openai_models` method which generates a list of thoughts using OpenAI API. The thoughts are then returned by the `generate_thoughts` method.", + "type": "comment" + }, + "84": { + "file_id": 9, + "content": "termining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. \n Considering the reasoning provided:\\n\\n\n ###'{state_text}'\\n\\n###\n Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: \n ###{rejected_solutions}###, \n complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.\"\"\"\n prompt += self.ReAct_prompt\n thoughts = self.generate_text(prompt, k)\n return thoughts\n def generate_solution(self, initial_prompt, state, rejected_solutions=None):\n try:\n if isinstance(state, list):\n state_text = \"\\n\".join(state)\n else:\n state_text = state\n prompt = f\"\"\"You're an TreeofThoughts, an superintelligent AI model devoted to ", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:92-110" + }, + "85": { + "file_id": 9, + "content": "This code is generating a prompt for an AI model to find the best solution, taking into account rejected solutions and learning from them. The function generates_solution is used to generate a solution given a prompt, state, and optionally a list of rejected solutions. It creates a formatted prompt including the initial prompt, the current state, and either all rejected solutions or none. Then it calls another function, generate_text, with this prompt and some variable k to generate thoughts based on this prompt. Finally, it returns these thoughts as the generated solution.", + "type": "comment" + }, + "86": { + "file_id": 9, + "content": "helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. \n Considering the reasoning provided:\\n\\n\n ###'{state_text}'\\n\\n###\n Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: \n ###{rejected_solutions}###, \n complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.\"\"\"\n answer = self.generate_text(prompt, 1)\n print(f\"Answerrrrrr {answer}\")\n # print(thoughts)\n # print(f\"General Solution : {answer}\")\n return answer\n except Exception as e:\n logger.error(f\"Error in generate_solutions: {e}\")", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:110-122" + }, + "87": { + "file_id": 9, + "content": "This code aims to generate a solution for a task based on user instructions, learning from rejected solutions and avoiding the same mistakes. It generates one textual answer with simplicity and directness. If an exception occurs, it logs the error.", + "type": "comment" + }, + "88": { + "file_id": 9, + "content": " return None\n def evaluate_states(self, states, initial_prompt):\n if not states:\n return {}\n if self.evaluation_strategy == \"value\":\n state_values = {}\n for state in states:\n if type(state) == str:\n state_text = state\n else:\n state_text = \"\\n\".join(state)\n print(\n \"We receive a state of type\",\n type(state),\n \"For state: \",\n state,\n \"\\n\\n\",\n )\n prompt = f\"\"\" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\\n\n Past solutions:\\n\\n\n {state_text}\\n \n If the solutions is not directly concretely making fast progress in achieving the goal, give it a lower score.", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:123-146" + }, + "89": { + "file_id": 9, + "content": "The code defines a function `evaluate_states` that takes in a list of states and an initial prompt. It iterates over each state, checks its type, and prints details about the state. If the evaluation strategy is set to \"value\", it calculates the value of each state by pessimistically valuing the context of past solutions and the latest generated solution as a float between 0 and 1.", + "type": "comment" + }, + "90": { + "file_id": 9, + "content": " Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\\n, DO NOT RETURN ANYTHING ELSE\n \"\"\"\n response = self.openai_api_call_handler(prompt, 10, 1)\n try:\n value_text = self.openai_choice2text_handler(\n response.choices[0]\n )\n # print(f'state: {value_text}')\n value = float(value_text)\n print(f\"Evaluated Thought Value: {value}\")\n except ValueError:\n value = 0 # Assign a default value if the conversion fails\n state_values[state] = value\n return state_values\n elif self.evaluation_strategy == \"vote\":\n states_text = \"\\n\".join([\" \".join(state) for state in states])\n prompt = (\n \"Given the following states of reasoning, vote for the best\"\n \" state utilizing an scalar value\"\n f\" 1-10:\\n{states_text}\\n\\nVote, on the probability of this\"", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:147-168" + }, + "91": { + "file_id": 9, + "content": "The code evaluates thought values using an open-source API call, converts the response to a float value between 0 and 1, and stores the results in a dictionary. It handles conversion errors by assigning a default value of 0 for invalid input. The evaluation strategy can be either \"float\" or \"vote\", with the latter requiring a user to choose the best state from multiple options.", + "type": "comment" + }, + "92": { + "file_id": 9, + "content": " f\" state of reasoning achieveing {initial_prompt} and become\"\n \" very pessimistic very NOTHING ELSE\"\n )\n response = self.openai_api_call_handler(prompt, 50, 1)\n print(f\"state response: {response}\")\n best_state_text = self.openai_choice2text_handler(\n response.choices[0]\n )\n print(f\"Best state text: {best_state_text}\")\n best_state = tuple(best_state_text.split())\n print(f\"best_state: {best_state}\")\n return {state: 1 if state == best_state else 0 for state in states}\n else:\n raise ValueError(\n \"Invalid evaluation strategy. Choose 'value' or 'vote'.\"\n )", + "type": "code", + "location": "/tree_of_thoughts/openai_models.py:169-186" + }, + "93": { + "file_id": 9, + "content": "Code makes an OpenAI API call to generate a response based on the provided prompt. It then selects the best state from the generated text, assigns a value of 1 if it matches the best state and 0 otherwise for each state in the input states. If an invalid evaluation strategy is provided, it raises a ValueError.", + "type": "comment" + }, + "94": { + "file_id": 10, + "content": "/tree_of_thoughts/treeofthoughts.py", + "type": "filepath" + }, + "95": { + "file_id": 10, + "content": "The code defines a `TreeofThoughts` class with JSON saving, logging, and BFS/DFS-based subclass for solving problems concurrently. It uses A* search, PriorityQueue, Monte Carlo tree search, transposition table, pruning, UCB1, and checks solution validity.", + "type": "summary" + }, + "96": { + "file_id": 10, + "content": "import concurrent.futures\nimport json\nimport logging\nimport os\nimport time\nfrom queue import PriorityQueue\nfrom typing import Any, Dict, Union\nimport numpy as np\nDATA_PATH = \"./data\"\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\"\n)\nlogger = logging.getLogger(__name__)\nclass TreeofThoughts:\n def __init__(self, model):\n self.model = model\n self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {\n \"nodes\": {},\n }\n self.best_state = None\n self.best_value = float(\"-inf\")\n self.history = [] # added line initalize history\n def save_tree_to_json(self, file_name):\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n with open(file_name, \"w\") as json_file:\n json.dump(self.tree, json_file, indent=4)\n def logNewState(self, state, evaluation):\n if not (type(state) == str):\n state = \" | \".join(state)\n if state in self.tree[\"nodes\"]:\n self.tree[\"nodes\"][state][\"thoughts\"].append(evaluation)", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:1-40" + }, + "97": { + "file_id": 10, + "content": "The code defines a class called TreeofThoughts with an init method that initializes object attributes, including a dictionary for the tree structure. It also includes methods to save the tree to JSON and log new states along with their associated thoughts or evaluations. The history attribute is added in the init method as an empty list.", + "type": "comment" + }, + "98": { + "file_id": 10, + "content": " else:\n self.tree[\"nodes\"][state] = {\"thoughts\": [evaluation]}\n def adjust_pruning_threshold_precentile(\n self, evaluated_thoughts, percentile\n ):\n values = np.array(list(evaluated_thoughts.values()))\n if values.size == 0:\n return 0\n return max(np.percentile(values, percentile), 0.1)\n def adjust_pruning_threshold_moving_average(\n self, evaluated_thoughts, window_size\n ):\n values = list(evaluated_thoughts.values())\n if len(values) < window_size:\n return np.mean(values) if values else 0\n else:\n return max(np.mean(values[-window_size:]), 0.1)\n######################\nclass TreeofThoughtsBFS(TreeofThoughts):\n def solve(\n self,\n initial_prompt,\n num_thoughts,\n max_steps,\n max_states,\n value_threshold,\n pruning_threshold=0.5,\n ):\n current_states = [initial_prompt]\n state_values = {}\n dynamic_pruning_threshold = pruning_threshold\n try:", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:41-79" + }, + "99": { + "file_id": 10, + "content": "This code defines a class `TreeofThoughtsBFS` that inherits from `TreeofThoughts`. It has methods for adjusting pruning thresholds based on percentile and moving average. The `solve()` method takes an initial prompt, number of thoughts, max steps, max states, value threshold, and pruning threshold. It uses BFS to explore the tree of thoughts.", + "type": "comment" + } +} \ No newline at end of file diff --git a/docs/data/1.json b/docs/data/1.json new file mode 100644 index 0000000..b869cb6 --- /dev/null +++ b/docs/data/1.json @@ -0,0 +1,178 @@ +{ + "100": { + "file_id": 10, + "content": " with concurrent.futures.ThreadPoolExecutor() as executor:\n for step in range(1, max_steps + 1):\n selected_states = []\n for state in current_states:\n thoughts = self.model.generate_thoughts(\n state, num_thoughts, initial_prompt\n )\n futures = [\n executor.submit(\n self.model.evaluate_states,\n {thought: 0},\n initial_prompt,\n )\n for thought in thoughts\n ]\n concurrent.futures.wait(futures)\n evaluated_thoughts = {\n thought: fut.result()\n for thought, fut in zip(thoughts, futures)\n if isinstance(fut.result(), (int, float))\n } # check if result is a number", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:80-100" + }, + "101": { + "file_id": 10, + "content": "This code uses a ThreadPoolExecutor to concurrently evaluate multiple thoughts generated from states. It selects thoughts from the current_states, submits them for evaluation, waits for the results, and filters out non-numeric results before storing the evaluated thoughts.", + "type": "comment" + }, + "102": { + "file_id": 10, + "content": " if (\n evaluated_thoughts\n ): # only adjust if you have evaluated thoughts\n dynamic_pruning_threshold = (\n self.adjust_pruning_threshold_moving_average(\n evaluated_thoughts, 5\n )\n )\n for thought, value in evaluated_thoughts.items():\n flattened_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n selected_states.append((flattened_state, value))\n selected_states.sort(key=lambda x: x[1], reverse=True)\n selected_states = selected_states[\n :max_states\n ] # Select only the top states", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:102-122" + }, + "103": { + "file_id": 10, + "content": "This code checks if there are evaluated thoughts and adjusts the pruning threshold based on them. It then loops through the evaluated thoughts, flattens the state with the thought (if the state is a string), and appends it to selected_states along with the value. The selected states are sorted by value in descending order, and only the top max_states are kept.", + "type": "comment" + }, + "104": { + "file_id": 10, + "content": " for state, value in selected_states:\n if value >= dynamic_pruning_threshold:\n state_values[state] = value\n self.logNewState(state, value)\n logger.debug(f\"State Values: {state_values}\")\n # if state_values:\n # highest_rated_solution = max(state_values.items(), key=lambda x: x[1])\n # print(f\"highest rated solution: {highest_rated_solution}\")\n # highest_rated_state = highest_rated_solution[0] # Use a different name to avoid confusion\n # print(f'highest rated state: {highest_rated_state}')\n # try:\n # solution = self.model.generate_solution(initial_prompt, highest_rated_state)\n # except Exception as e:\n # logger.error(f\"Error in generating solution: {e}\")\n # solution = None # Set a fallback value for solution", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:124-139" + }, + "105": { + "file_id": 10, + "content": "This code block iterates through selected_states and assigns values to state_values if the value is above a certain threshold. It logs the state_values, and if there are values in state_values, it finds the highest rated solution, generates a solution using the model, and logs any errors that occur during this process.", + "type": "comment" + }, + "106": { + "file_id": 10, + "content": " # return solution if solution is not None else highest_rated_state # Return highest rated state if solution is None\n if state_values:\n highest_rated_solution = max(\n state_values.items(), key=lambda x: x[1]\n )\n highest_rated_state = highest_rated_solution[0]\n solution = self.model.generate_solution(\n initial_prompt, highest_rated_state\n )\n print(\n \"Highest_rated solution:\"\n f\" {highest_rated_solution} highest_rated_solution:\"\n f\" {highest_rated_solution} Solution: {solution}\"\n )\n return solution if solution else highest_rated_state\n else:\n return None\n except Exception as e:\n logger.error(f\"Error in tot_bfs: {e}\")\n return None\n###########\nclass TreeofThoughtsDFS(TreeofThoughts):\n def solve(\n self,\n initial_prompt,", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:141-172" + }, + "107": { + "file_id": 10, + "content": "The code checks if there is a solution and returns it, otherwise, it finds the highest rated state using DFS, generates a solution from that state, and returns it. If there is an error during execution, it logs the error and returns None.", + "type": "comment" + }, + "108": { + "file_id": 10, + "content": " num_thoughts,\n max_steps,\n value_threshold,\n pruning_threshold=0.5,\n ):\n output = []\n def dfs(state, step):\n nonlocal output\n if step > max_steps:\n thought = self.model.generate_thoughts(state, 1, initial_prompt)\n value = self.model.evaluate_states({state}, initial_prompt)[\n state\n ]\n output.append((thought, value))\n return\n thoughts = self.model.generate_thoughts(\n state, self.num_thoughts, initial_prompt\n )\n evaluated_thoughts = self.model.evaluate_states(\n {thought: 0 for thought in thoughts}, initial_prompt\n )\n filtered_thoughts = [\n thought\n for thought in thoughts\n if evaluated_thoughts[thought] >= self.pruning_threshold\n ]\n for next_state in filtered_thoughts:\n state_value = self.model.evaluate_states(", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:173-203" + }, + "109": { + "file_id": 10, + "content": "The code defines a function that performs Depth-First Search (DFS) to explore the thought space and evaluate potential thoughts. It takes in parameters for maximum steps, pruning threshold, and others. The function generates initial thoughts, evaluates them, filters out lower-scoring ones, and continues DFS on the remaining thoughts until max_steps is reached or all thoughts are exhausted.", + "type": "comment" + }, + "110": { + "file_id": 10, + "content": " {next_state: 0}, initial_prompt\n )[next_state]\n if state_value > self.value_threshold:\n child = (\n (state, next_state)\n if isinstance(state, str)\n else (*state, next_state)\n )\n dfs(child, step + 1)\n try:\n dfs(initial_prompt, 1)\n best_state, _ = max(output, key=lambda x: x[1])\n solution = self.model.generate_solution(initial_prompt, best_state)\n return solution if solution else best_state\n except Exception as e:\n logger.error(f\"Error in tot_dfs: {e}\")\n return None\n# v2 => best first search => explores state space of the quality of the states\n# priority que or greedy BFS\nclass TreeofThoughtsBEST:\n def __init__(self, model):\n self.model = model\n self.tree = {\"nodes\": {}}\n def save_tree_to_json(self, file_name):\n os.makdirs(os.path.dirname(file_name), exist_ok=True)", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:204-233" + }, + "111": { + "file_id": 10, + "content": "This code is implementing a best-first search algorithm using a priority queue. It explores the state space of the quality of states, saves the tree to JSON format, and handles exceptions in case any occur during execution. The goal is to find the best solution for a given model and initial prompt.", + "type": "comment" + }, + "112": { + "file_id": 10, + "content": " with open(file_name, \"w\") as json_file:\n json.dump(self.tree, json_file, indent=4)\n def log_new_state(self, state, evaluation):\n state_key = \" | \".join(state) if isinstance(state, tuple) else state\n if state_key in self.tree[\"nodes\"]:\n self.tree[\"nodes\"][state_key][\"thoughts\"].append(evaluation)\n else:\n self.tree[\"nodes\"][\"state_key\"] = {\"thoughts\": [evaluation]}\n def solve(self, initial_prompt, num_thoughts, max_steps, pruning_threshold):\n visited_states = set()\n state_queue = PriorityQueue()\n state_queue.put((0, initial_prompt))\n for _ in range(max_steps):\n if state_queue.empty():\n break\n _, state = state_queue.get()\n if state in visited_states:\n continue\n visited_states.add(state)\n thoughts = self.model.generate_thoughts(\n state, num_thoughts, initial_prompt\n )\n evaluated_thoughts = {\n thought: self.model.evaluate_states(", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:234-265" + }, + "113": { + "file_id": 10, + "content": "This code initializes a tree of thoughts, logs new states with their evaluations, solves the tree by generating and evaluating thoughts for each state up to a certain number, and stores the tree in a JSON file. It utilizes PriorityQueue, set, and functions for thought generation and evaluation.", + "type": "comment" + }, + "114": { + "file_id": 10, + "content": " {thought: 0}, initial_prompt\n )[thought]\n for thought in thoughts\n }\n for thought, value in evaluated_thoughts.items():\n if value >= pruning_threshold:\n new_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n state_queue.put((value, new_state))\n self.log_new_state(new_state, value)\n best_state = max(visited_states, key=self.model.evaluate_states)\n solution = self.model.generate_solution(initial_prompt, best_state)\n print(f\"Highest_rated solution: {best_state} Solution: {solution}\")\n return solution if solution else best_state\n# A* search algorithm\nclass TreeofThoughtsASearch:\n def __init__(self, model):\n self.model = model\n def solve(\n self,\n initial_prompt,\n num_thoughts=5,\n max_steps=30,", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:266-296" + }, + "115": { + "file_id": 10, + "content": "The code performs a search using the A* algorithm to find the highest-rated solution for a given initial prompt. It takes in a model and parameters such as the number of thoughts, maximum steps, and generates solutions based on evaluating states and generating new states. The best state is determined based on evaluation, and the function returns either the generated solution or the best state if no solution was found.", + "type": "comment" + }, + "116": { + "file_id": 10, + "content": " pruning_threshold=0.4,\n ):\n # the open set is implemented as a piorituve quue where the priority is -f_score\n open_set = PriorityQueue()\n open_set.put((0, 0, initial_prompt))\n # the set of visited_states\n visited_states = set()\n # the g_scores and f-scores are stored as dictionaries\n g_scores = {initial_prompt: 0}\n f_scores = {\n initial_prompt: self.model.evaluate_states(\n {initial_prompt: 0}, initial_prompt\n )[initial_prompt]\n }\n # the parent of each state is stored in a dictionary\n came_from = {}\n for _ in range(max_steps):\n if open_set.empty():\n break\n _, _, current_state = open_set.get()\n if self.is_goal(current_state, f_scores[current_state]):\n return self.reconstruct_path(\n came_from, current_state, initial_prompt\n )\n thoughts = self.model.generate_thoughts(\n current_state, num_thoughts, initial_prompt", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:297-329" + }, + "117": { + "file_id": 10, + "content": "The code initializes a graph search algorithm with priority queue, sets visited_states and dictionaries for g_scores, f_scores, and came_from. It then iterates through max_steps to find a goal state, breaks if none found, and returns reconstructed path if a goal is reached.", + "type": "comment" + }, + "118": { + "file_id": 10, + "content": " )\n evaluated_thoughts = {\n thought: self.model.evaluate_states(\n {thought: 0}, initial_prompt\n )[thought]\n for thought in thoughts\n }\n for thought, value in evaluated_thoughts.items():\n if value < pruning_threshold or thought in visited_states:\n continue\n tentative_g_score = g_scores[current_state] + 1 / value\n if (\n thought not in g_scores\n or tentative_g_score < g_scores[thought]\n ):\n came_from[thought] = current_state\n g_scores[thought] = tentative_g_score\n f_scores[thought] = tentative_g_score + value\n open_set.put(\n (-f_scores[thought], g_scores[thought], thought)\n )\n return self.reconstruct_path(came_from, current_state, initial_prompt)\n def is_goal(self, state, score):", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:330-356" + }, + "119": { + "file_id": 10, + "content": "This code evaluates states in a tree-like structure by using a model to assign scores. It then selects the best state, updating g_scores and f_scores based on these scores. Finally, it reconstructs the path from the selected state back to the initial prompt.", + "type": "comment" + }, + "120": { + "file_id": 10, + "content": " # if eval state is above 0.9\n return score >= 0.9\n def reconstruct_path(self, came_from, current_state, initial_prompt):\n path = [current_state]\n while current_state in came_from:\n current_state = came_from[current_state]\n path.append(current_state)\n path.reverse()\n path = self.reconstruct_path(came_from, current_state, initial_prompt)\n solution = self.model.generate_solution(initial_prompt, path)\n print(f\"Path: {path} solution: {solution}\")\n return solution if solution else path\nclass MonteCarloTreeofThoughts(TreeofThoughts):\n def __init__(self, model, objective=\"balance\"):\n super().__init__(model)\n self.objective = objective\n self.solution_found = False\n self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {\n \"nodes\": {},\n \"metrics\": {\"thoughts\": {}, \"evaluations\": {}},\n }\n def optimize_params(self, num_thoughts, max_steps, max_states):\n if self.objective == \"speed\":", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:357-384" + }, + "121": { + "file_id": 10, + "content": "The code checks if the eval state is above 0.9 and returns a boolean value. It then reconstructs the path by traversing backwards from the current state to the initial state, and generates a solution using the model. The solution or path is returned as output.", + "type": "comment" + }, + "122": { + "file_id": 10, + "content": " num_thoughts = max(1, num_thoughts - 1)\n max_steps = max(1, max_steps - 1)\n max_states = max(1, max_states - 1)\n elif self.objective == \"reliability\":\n num_thoughts += 1\n max_steps += 1\n max_states += 1\n elif self.objective == \"balanace\":\n if self.solution_found:\n num_thoughts = max(1, num_thoughts - 1)\n max_steps = max(1, max_steps - 1)\n max_states = max(1, max_states - 1)\n else:\n num_thoughts += 1\n max_steps += 1\n max_states += 1\n return num_thoughts, max_steps, max_states\n def solve(\n self,\n initial_prompt: str,\n num_thoughts: int,\n max_steps: int,\n max_states: int,\n pruning_threshold: float,\n # sleep_time: float,\n ):\n self.file_name = \"logs/tree_of_thoughts_output_montecarlo.json\"\n return self.monte_carlo_search(\n initial_prompt,\n num_thoughts,", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:385-416" + }, + "123": { + "file_id": 10, + "content": "The code snippet is from the \"treeofthoughts.py\" file. It contains a function that adjusts the number of thoughts, max steps, and max states based on different objectives (\"reliability\" or \"balance\"). If the solution has been found, it reduces these values; otherwise, it increases them. The solve() function initializes parameters and calls monte_carlo_search().", + "type": "comment" + }, + "124": { + "file_id": 10, + "content": " max_steps,\n max_states,\n pruning_threshold,\n # sleep_time,\n )\n # v3\n def monte_carlo_search(\n self,\n initial_prompt: str,\n num_thoughts: int,\n max_steps: int,\n max_states: int,\n pruning_threshold: float,\n ):\n current_states = [initial_prompt]\n state_values = {}\n visit_counts = {initial_prompt: 0}\n transposition_table = {}\n best_state = None\n best_value = float(\"-inf\")\n for step in range(1, max_steps + 1):\n selected_states = []\n for state in current_states:\n if state in transposition_table:\n transposition_table[state]\n else:\n time.sleep(1)\n thoughts = self.model.generate_thoughts(\n state, num_thoughts, initial_prompt\n )\n time.sleep(1)\n evaluated_thoughts = self.model.evaluate_states(", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:417-452" + }, + "125": { + "file_id": 10, + "content": "Function `monte_carlo_search` performs a Monte Carlo tree search algorithm for generating thoughts from an initial prompt. It iteratively expands states, evaluates them, and updates the transposition table and state values. Pruning is applied based on threshold and visit counts to optimize search process.", + "type": "comment" + }, + "126": { + "file_id": 10, + "content": " thoughts, initial_prompt\n )\n for thought, value in evaluated_thoughts.items():\n flattened_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n transposition_table[flattened_state] = value\n for thought, value in evaluated_thoughts.items():\n flattened_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n if flattened_state not in visit_counts:\n visit_counts[flattened_state] = 0\n if (\n visit_counts[state] > visit_counts[flattened_state]\n and visit_counts[flattened_state] > 0\n ):\n ucb1_value = value + np.sqrt(", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:453-478" + }, + "127": { + "file_id": 10, + "content": "This code is creating a transposition table and updating visit counts. It iterates over evaluated thoughts, flattens the states, updates or adds to the transposition table, and checks the visit count. If the new state's count is greater than its parent state's count, it updates the UCB1 value by adding the thought value and the square root of the visit count.", + "type": "comment" + }, + "128": { + "file_id": 10, + "content": " 2\n * np.log(visit_counts[state])\n / visit_counts[flattened_state]\n )\n if ucb1_value >= pruning_threshold:\n selected_states.append(flattened_state)\n state_values[flattened_state] = value\n # Update the best state if the current state value is greater than the best value\n if value > best_value:\n best_state = flattened_state\n best_value = value\n visit_counts[state] += 1\n if len(selected_states) > max_states:\n current_states = selected_states[:max_states]\n self.save_tree_to_json(self.file_name)\n # if best_state is not None:\n # solution = self.model.generate_solution(initial_prompt, best_state)\n # return solution\n # else:\n # solution = None", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:479-503" + }, + "129": { + "file_id": 10, + "content": "This code is selecting states for a tree-based model. It uses UCB1 (Upper Confidence Bound for the first time) to select states and keeps track of visit counts, values, and best state. If too many states are selected, it only considers the top maximum_states. Finally, it saves the tree as JSON and generates a solution if a best state is found.", + "type": "comment" + }, + "130": { + "file_id": 10, + "content": " # return None\n solution = self.model.generate_solution(initial_prompt, best_state)\n return solution if solution else best_state", + "type": "code", + "location": "/tree_of_thoughts/treeofthoughts.py:505-507" + }, + "131": { + "file_id": 10, + "content": "This code block checks if the generated solution from the model is valid. If it's not, it returns the best_state as the final result.", + "type": "comment" + } +} \ No newline at end of file diff --git a/docs/data/titles/0.json b/docs/data/titles/0.json new file mode 100644 index 0000000..7f1f6e4 --- /dev/null +++ b/docs/data/titles/0.json @@ -0,0 +1,65 @@ +{ + "/README.md": "Monte Carlo Tree of Thoughts: AI Collaboration Tool", + "/README.md:1-9": "Social Media Sharing Buttons", + "/README.md:12-46": "Boost Model Reasoning with ToT Algorithm", + "/README.md:141-162": "Collaborative Problem Solving Tree", + "/README.md:48-96": "Generating Thoughts with Code", + "/README.md:9-12": "Sharing Options for Tree-of-Thoughts Project", + "/README.md:97-140": "Monte Carlo Tree of Thoughts Algorithm", + "/example.py": "Monte Carlo Tree Search with OpenAI Model", + "/example.py:1-45": "OpenAI Thoughts Tree Solver", + "/example.py:46-53": "Object Initialization and Printing Solution", + "/prompts.txt": "Collaborative Tree-of-Thoughts Q&A", + "/prompts.txt:1-5": "Experts' Collaborative Thought Process", + "/prompts.txt:5-9": "Collaborative Tree of Thoughts Problem Solving", + "/prompts.txt:9-9": "Collaborative Problem-Solving Process", + "/pyproject.toml": "Configure Pyproject.toml for Python", + "/pyproject.toml:1-41": "Pyproject.toml: Project Configurations", + "/pyproject.toml:42-53": "Configuring Ruff and Black for Linting", + "/requirements.txt": "Importing NLP, AI, and Distributed Libraries", + "/tree_of_thoughts/README.md": "Enhanced Tree of Thoughts with Advanced Search Algos and Optimized Classes", + "/tree_of_thoughts/README.md:1-36": "Changelog: TreeofThoughts Refactoring", + "/tree_of_thoughts/README.md:109-127": "Tree of Thoughts Search Algorithm Implementation", + "/tree_of_thoughts/README.md:128-141": "Tree of Thoughts Algorithms: BFS, DFS, Best-First", + "/tree_of_thoughts/README.md:143-154": "Tree of Thoughts Search Algorithms", + "/tree_of_thoughts/README.md:156-169": "Optimized Tree of Thoughts: Monte Carlo and Solve Algorithms", + "/tree_of_thoughts/README.md:169-169": "Efficient Search Algorithm with Custom Parameters", + "/tree_of_thoughts/README.md:38-60": "TreeofThoughts: Tree-based Search Algorithm for Problem Solving", + "/tree_of_thoughts/README.md:61-77": "Algorithmic Search Methods", + "/tree_of_thoughts/README.md:78-94": "Pruning Threshold in Search Algorithms", + "/tree_of_thoughts/README.md:94-107": "Recursive Depth-First Search Algorithm", + "/tree_of_thoughts/__init__.py": "Initializing ThoughtTree Models", + "/tree_of_thoughts/base.py": "Abstract Base Class for Language Models", + "/tree_of_thoughts/huggingface_model.py": "HuggingFace Model Class and Function", + "/tree_of_thoughts/huggingface_model.py:1-31": "Generate Thoughts with Huggingface Model", + "/tree_of_thoughts/huggingface_model.py:33-58": "Model-based State Evaluation", + "/tree_of_thoughts/huggingface_model.py:59-64": "Error Handling and Resetting State Values", + "/tree_of_thoughts/openai_models.py": "OpenAI Chat API Generative Model", + "/tree_of_thoughts/openai_models.py:1-34": "OpenAI Language Model Class", + "/tree_of_thoughts/openai_models.py:110-122": "Error-Avoiding Generative Solution", + "/tree_of_thoughts/openai_models.py:123-146": "Evaluating States with Value Strategy", + "/tree_of_thoughts/openai_models.py:147-168": "OpenAI Model: Thought Evaluation", + "/tree_of_thoughts/openai_models.py:169-186": "OpenAI API for State Evaluation", + "/tree_of_thoughts/openai_models.py:35-64": "OpenAI Chat API Integration for ReAct Prompting", + "/tree_of_thoughts/openai_models.py:65-92": "Generate Thoughts using OpenAI Models", + "/tree_of_thoughts/openai_models.py:92-110": "Reinforcement Learning from Rejected Solutions", + "/tree_of_thoughts/treeofthoughts.py": "Multi-search Algorithms Tree of Thoughts", + "/tree_of_thoughts/treeofthoughts.py:1-40": "Tree of Thoughts Class and Methods", + "/tree_of_thoughts/treeofthoughts.py:102-122": "Pruning Threshold Adjustment", + "/tree_of_thoughts/treeofthoughts.py:124-139": "Filtering and Logging State Values", + "/tree_of_thoughts/treeofthoughts.py:141-172": "Error-Handled Solution Finder", + "/tree_of_thoughts/treeofthoughts.py:173-203": "Depth-First Thought Evaluation", + "/tree_of_thoughts/treeofthoughts.py:204-233": "Best-First Search Algorithm with Priority Queue", + "/tree_of_thoughts/treeofthoughts.py:234-265": "Tree of Thoughts: Thought Generation and Evaluation Algorithm", + "/tree_of_thoughts/treeofthoughts.py:266-296": "A* Algorithm for Optimized Thought Generation", + "/tree_of_thoughts/treeofthoughts.py:297-329": "Graph Search Algorithm: Priority Queue and Path Reconstruction", + "/tree_of_thoughts/treeofthoughts.py:330-356": "Optimizing Tree-Based State Scores", + "/tree_of_thoughts/treeofthoughts.py:357-384": "Evaluation-Guided Path Reconstruction", + "/tree_of_thoughts/treeofthoughts.py:385-416": "Adjustable Tree of Thoughts Algorithm", + "/tree_of_thoughts/treeofthoughts.py:41-79": "Tree of Thoughts BFS Algorithm", + "/tree_of_thoughts/treeofthoughts.py:417-452": "Monte Carlo Thought Generation", + "/tree_of_thoughts/treeofthoughts.py:453-478": "Transposition Table Update and UCB1 Calculation", + "/tree_of_thoughts/treeofthoughts.py:479-503": "Tree Model State Selection", + "/tree_of_thoughts/treeofthoughts.py:505-507": "Valid Solution Check", + "/tree_of_thoughts/treeofthoughts.py:80-100": "Concurrent Thought Evaluation" +} \ No newline at end of file diff --git a/docs/doc/159bfe49-598c-44d0-8f3a-93289173db20.json b/docs/doc/159bfe49-598c-44d0-8f3a-93289173db20.json new file mode 100644 index 0000000..187ba59 --- /dev/null +++ b/docs/doc/159bfe49-598c-44d0-8f3a-93289173db20.json @@ -0,0 +1,15 @@ +{ + "summary": "The code imports OpenAI language model, uses Monte Carlo Tree of Thoughts algorithm to solve a problem defined by the initial prompt, and prints the solution with specified parameters.", + "details": [ + { + "comment": "Code imports necessary packages and initializes an OpenAI language model, Monte Carlo Tree of Thoughts algorithm, sets initial prompt and parameters for generating thoughts. It then uses the Tree of Thoughts algorithm to solve a problem defined by the initial prompt.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/example.py\":0-44", + "content": "import os\nfrom tree_of_thoughts.openai_models import OpenAILanguageModel\nfrom tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts\nfrom dotenv import load_dotenv\nload_dotenv()\napi_key = os.environ.get(\"OPENAI_API_KEY\")\n# Initialize the OpenAILanguageModel class with the API key\nmodel = OpenAILanguageModel(api_key=api_key)\n# Initialize the MonteCarloTreeofThoughts class with the model\ntree_of_thoughts = MonteCarloTreeofThoughts(model)\n# Define the initial prompt\ninitial_prompt = \"\"\"\nInput: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation\nPossible next steps:\n\"\"\"\n# Define the number of thoughts to generate\nnum_thoughts = 1\nmax_steps = 3\nmax_states = 4\npruning_threshold = 0.5\n# Generate the thoughts\nsolution = tree_of_thoughts.solve(\n initial_prompt=initial_prompt," + }, + { + "comment": "This code snippet is initializing an object with parameters, and then printing the solution. The variables represent the number of thoughts, maximum steps, maximum states, pruning threshold (likely for optimization), and possibly a sleep time (though commented out).", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/example.py\":45-52", + "content": " num_thoughts=num_thoughts,\n max_steps=max_steps,\n max_states=max_states,\n pruning_threshold=pruning_threshold,\n # sleep_time=sleep_time\n)\nprint(f\"Solution: {solution}\")" + } + ] +} \ No newline at end of file diff --git a/docs/doc/411149f0-4819-445f-ad5b-39c71b7b25cc.json b/docs/doc/411149f0-4819-445f-ad5b-39c71b7b25cc.json new file mode 100644 index 0000000..ec35712 --- /dev/null +++ b/docs/doc/411149f0-4819-445f-ad5b-39c71b7b25cc.json @@ -0,0 +1,45 @@ +{ + "summary": "This code initializes an OpenAI language model, generating text using the Chat API. It supports ReAct prompting, generates solutions, learns from errors, and evaluates states with an API.", + "details": [ + { + "comment": "This code is initializing a logging configuration and defining the OpenAILanguageModel class, which extends AbstractLanguageModel. The OpenAILanguageModel is an OpenAI language model with options for strategy, evaluation_strategy, api_base, api_model, enable_ReAct_prompting, and more. It allows for generating and evaluating thoughts using OpenAI API.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":0-33", + "content": "import logging\nfrom tree_of_thoughts.base import AbstractLanguageModel\nfrom swarms.models import OpenAIChat\n# Logging\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\"\n)\nlogger = logging.getLogger(__name__)\nclass OpenAILanguageModel(AbstractLanguageModel):\n \"\"\"\n OpenAI Language Model\n Args:\n api_key (str): OpenAI API key\n strategy (str): Strategy for generating thoughts. Choose from 'cot' (Chain of Thoughts) or 'gpt' (GPT-3)\n evaluation_strategy (str): Strategy for evaluating thoughts. Choose from 'value' or 'vote'\n api_base (str): Base path for OpenAI API\n api_model (str): Model name for OpenAI API\n enable_ReAct_prompting (bool): Enable ReAct prompting\n Examples:\n >>> from tree_of_thoughts.models.openai_models import OpenAILanguageModel\n >>> model = OpenAILanguageModel(api_key=api_key)\n >>> model.generate_thoughts(state, k)\n >>> model.evaluate_states(states, initial_prompt)\n \"\"\"\n def __init__(" + }, + { + "comment": "Class initializes OpenAI Chat API for generating text. It allows ReAct prompting, which requires a specific format of observations and thoughts. Uses generate_text() function to produce text based on the given prompt.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":34-63", + "content": " self,\n api_key,\n strategy=\"cot\",\n evaluation_strategy=\"value\",\n enable_ReAct_prompting=True,\n *args,\n **kwargs,\n ):\n self.api_key = api_key\n self.use_chat_api = True\n self.enable_ReAct_prompting = enable_ReAct_prompting\n self.strategy = strategy\n self.evaluation_strategy = evaluation_strategy\n # reference : https://www.promptingguide.ai/techniques/react\n self.ReAct_prompt = \"\"\n if enable_ReAct_prompting:\n self.ReAct_prompt = (\n \"Write down your observations in format 'Observation:xxxx',\"\n \" then write down your thoughts in format 'Thoughts:xxxx'.\"\n )\n self.model = OpenAIChat(openai_api_key=api_key, *args, **kwargs)\n def generate_text(self, prompt: str, k: int = 3):\n \"\"\"Generate text from prompt using OpenAI API\"\"\"\n if self.use_chat_api:\n thoughts = []\n for _ in range(k):\n response = self.model(prompt)" + }, + { + "comment": "The code defines a class with two methods: `generate_thoughts` and `openai_models`. The `generate_thoughts` method takes a state, number of thoughts to generate, initial prompt, and list of rejected solutions as input. It converts the state into a string and passes it along with other parameters to the `openai_models` method which generates a list of thoughts using OpenAI API. The thoughts are then returned by the `generate_thoughts` method.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":64-91", + "content": " thoughts += [response]\n # print(f'thoughts: {thoughts}')\n return thoughts\n def generate_thoughts(\n self, state, k, initial_prompt, rejected_solutions=None\n ):\n \"\"\"\n Generate thoughts from state using OpenAI API\n Args:\n state (str or list): State of reasoning\n k (int): Number of thoughts to generate\n initial_prompt (str): Initial prompt\n rejected_solutions (list): List of rejected solutions\n Returns:\n list: List of thoughts\n \"\"\"\n if type(state) == str:\n state_text = state\n else:\n state_text = \"\\n\".join(state)\n print(\"New state generating thought:\", state, \"\\n\\n\")\n prompt = f\"\"\"You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of de" + }, + { + "comment": "This code is generating a prompt for an AI model to find the best solution, taking into account rejected solutions and learning from them. The function generates_solution is used to generate a solution given a prompt, state, and optionally a list of rejected solutions. It creates a formatted prompt including the initial prompt, the current state, and either all rejected solutions or none. Then it calls another function, generate_text, with this prompt and some variable k to generate thoughts based on this prompt. Finally, it returns these thoughts as the generated solution.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":91-109", + "content": "termining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. \n Considering the reasoning provided:\\n\\n\n ###'{state_text}'\\n\\n###\n Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: \n ###{rejected_solutions}###, \n complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.\"\"\"\n prompt += self.ReAct_prompt\n thoughts = self.generate_text(prompt, k)\n return thoughts\n def generate_solution(self, initial_prompt, state, rejected_solutions=None):\n try:\n if isinstance(state, list):\n state_text = \"\\n\".join(state)\n else:\n state_text = state\n prompt = f\"\"\"You're an TreeofThoughts, an superintelligent AI model devoted to " + }, + { + "comment": "This code aims to generate a solution for a task based on user instructions, learning from rejected solutions and avoiding the same mistakes. It generates one textual answer with simplicity and directness. If an exception occurs, it logs the error.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":109-121", + "content": "helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. \n Considering the reasoning provided:\\n\\n\n ###'{state_text}'\\n\\n###\n Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: \n ###{rejected_solutions}###, \n complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.\"\"\"\n answer = self.generate_text(prompt, 1)\n print(f\"Answerrrrrr {answer}\")\n # print(thoughts)\n # print(f\"General Solution : {answer}\")\n return answer\n except Exception as e:\n logger.error(f\"Error in generate_solutions: {e}\")" + }, + { + "comment": "The code defines a function `evaluate_states` that takes in a list of states and an initial prompt. It iterates over each state, checks its type, and prints details about the state. If the evaluation strategy is set to \"value\", it calculates the value of each state by pessimistically valuing the context of past solutions and the latest generated solution as a float between 0 and 1.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":122-145", + "content": " return None\n def evaluate_states(self, states, initial_prompt):\n if not states:\n return {}\n if self.evaluation_strategy == \"value\":\n state_values = {}\n for state in states:\n if type(state) == str:\n state_text = state\n else:\n state_text = \"\\n\".join(state)\n print(\n \"We receive a state of type\",\n type(state),\n \"For state: \",\n state,\n \"\\n\\n\",\n )\n prompt = f\"\"\" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\\n\n Past solutions:\\n\\n\n {state_text}\\n \n If the solutions is not directly concretely making fast progress in achieving the goal, give it a lower score." + }, + { + "comment": "The code evaluates thought values using an open-source API call, converts the response to a float value between 0 and 1, and stores the results in a dictionary. It handles conversion errors by assigning a default value of 0 for invalid input. The evaluation strategy can be either \"float\" or \"vote\", with the latter requiring a user to choose the best state from multiple options.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":146-167", + "content": " Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\\n, DO NOT RETURN ANYTHING ELSE\n \"\"\"\n response = self.openai_api_call_handler(prompt, 10, 1)\n try:\n value_text = self.openai_choice2text_handler(\n response.choices[0]\n )\n # print(f'state: {value_text}')\n value = float(value_text)\n print(f\"Evaluated Thought Value: {value}\")\n except ValueError:\n value = 0 # Assign a default value if the conversion fails\n state_values[state] = value\n return state_values\n elif self.evaluation_strategy == \"vote\":\n states_text = \"\\n\".join([\" \".join(state) for state in states])\n prompt = (\n \"Given the following states of reasoning, vote for the best\"\n \" state utilizing an scalar value\"\n f\" 1-10:\\n{states_text}\\n\\nVote, on the probability of this\"" + }, + { + "comment": "Code makes an OpenAI API call to generate a response based on the provided prompt. It then selects the best state from the generated text, assigns a value of 1 if it matches the best state and 0 otherwise for each state in the input states. If an invalid evaluation strategy is provided, it raises a ValueError.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/openai_models.py\":168-185", + "content": " f\" state of reasoning achieveing {initial_prompt} and become\"\n \" very pessimistic very NOTHING ELSE\"\n )\n response = self.openai_api_call_handler(prompt, 50, 1)\n print(f\"state response: {response}\")\n best_state_text = self.openai_choice2text_handler(\n response.choices[0]\n )\n print(f\"Best state text: {best_state_text}\")\n best_state = tuple(best_state_text.split())\n print(f\"best_state: {best_state}\")\n return {state: 1 if state == best_state else 0 for state in states}\n else:\n raise ValueError(\n \"Invalid evaluation strategy. Choose 'value' or 'vote'.\"\n )" + } + ] +} \ No newline at end of file diff --git a/docs/doc/5026e750-f88b-47a9-bb0b-7390709f0ec9.json b/docs/doc/5026e750-f88b-47a9-bb0b-7390709f0ec9.json new file mode 100644 index 0000000..8447f62 --- /dev/null +++ b/docs/doc/5026e750-f88b-47a9-bb0b-7390709f0ec9.json @@ -0,0 +1,20 @@ +{ + "summary": "The code details a tree-of-thoughts approach for collaboratively answering a question through iterative refinement and markdown table formatted response.", + "details": [ + { + "comment": "The code describes a simulation of three brilliant experts collaboratively answering a question in a tree-of-thoughts approach, sharing their thought process step by step while considering and building upon each other's thoughts.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/prompts.txt\":0-4", + "content": "Imagine three different experts are answering this question. All experts will write down 1 step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is...\nSimulate three brilliant, logical experts collaboratively answering a question. Each one verbosely explains their thought process in real-time, considering the prior explanations of others and openly acknowledging mistakes. At each step, whenever possible, each expert refines and builds upon the thoughts of others, acknowledging their contributions. They continue until there is a definitive answer to the question. For clarity, your entire response should be in a markdown table. The question is...\nImagine three highly intelligent experts working together to answer a question. They will follow a tree of thoughts approach, where each expert shares their thought process step by step. They will consider the input fr" + }, + { + "comment": "Code describes a collaborative problem-solving process where three experts employ the tree of thoughts method to answer a question. They iteratively refine and expand upon each other's ideas, admit errors, and credit others. The final response will be presented in a markdown table format.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/prompts.txt\":4-8", + "content": "om others, refine their thoughts, and build upon the group's collective knowledge. If an expert realizes their thought is incorrect, they will acknowledge it and withdraw from the discussion. Continue this process until a definitive answer is reached. Present the entire response in a markdown table. The question is...\nThree experts with exceptional logical thinking skills are collaboratively answering a question using a tree of thoughts method. Each expert will share their thought process in detail, taking into account the previous thoughts of others and admitting any errors. They will iteratively refine and expand upon each other's ideas, giving credit where it's due. The process continues until a conclusive answer is found. Organize the entire response in a markdown table format. The question is...\nEnvision a group of three experts working in unison to tackle a question by employing a tree of thoughts strategy. Each expert will thoroughly explain their line of thinking at every step, w" + }, + { + "comment": "Code snippet describes a collaborative problem-solving process where team members contribute ideas, recognize mistakes, and iterate until reaching a solution. The response should be formatted as a markdown table.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/prompts.txt\":8-8", + "content": "hile also considering the insights provided by their peers. They will openly recognize any mistakes and build upon the group's shared understanding. This iterative process will continue until a definitive solution is reached. Structure the entire response as a markdown table. The question is..." + } + ] +} \ No newline at end of file diff --git a/docs/doc/826fb6d5-abd3-42f2-8916-15474b39c9d9.json b/docs/doc/826fb6d5-abd3-42f2-8916-15474b39c9d9.json new file mode 100644 index 0000000..89e5db0 --- /dev/null +++ b/docs/doc/826fb6d5-abd3-42f2-8916-15474b39c9d9.json @@ -0,0 +1,10 @@ +{ + "summary": "This code imports various language models and thought tree modules from different packages and adds them to the `__all__` list for importing in this package. It includes OpenAI's LanguageModel, TreeofThoughts, MonteCarloTreeofThoughts, and several search algorithm implementations of the ThoughtTree.", + "details": [ + { + "comment": "This code imports various language models and thought tree modules from different packages and adds them to the `__all__` list for importing in this package. It includes OpenAI's LanguageModel, TreeofThoughts, MonteCarloTreeofThoughts, and several search algorithm implementations of the ThoughtTree.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/__init__.py\":0-26", + "content": "from tree_of_thoughts.base import AbstractLanguageModel\n#from tree_of_thoughts.huggingface_model import (\n #HuggingLanguageModel,\n#)\nfrom tree_of_thoughts.openai_models import (\n OpenAILanguageModel,\n)\nfrom tree_of_thoughts.treeofthoughts import (\n MonteCarloTreeofThoughts,\n TreeofThoughts,\n TreeofThoughtsASearch,\n TreeofThoughtsBEST,\n TreeofThoughtsBFS,\n TreeofThoughtsDFS,\n)\n__all__ = [\n \"OpenAILanguageModel\",\n \"TreeofThoughts\",\n \"MonteCarloTreeofThoughts\",\n \"TreeofThoughtsBFS\",\n \"TreeofThoughtsDFS\",\n \"TreeofThoughtsBEST\",\n \"TreeofThoughtsASearch\",\n \"AbstractLanguageModel\",\n \"HuggingLanguageModel\",\n]" + } + ] +} \ No newline at end of file diff --git a/docs/doc/89f16cc7-5025-4dd8-8c1e-ea78b8c13932.json b/docs/doc/89f16cc7-5025-4dd8-8c1e-ea78b8c13932.json new file mode 100644 index 0000000..4e03027 --- /dev/null +++ b/docs/doc/89f16cc7-5025-4dd8-8c1e-ea78b8c13932.json @@ -0,0 +1,55 @@ +{ + "summary": "The code updates the TreeofThoughts class's changelog, adds search parameters, and includes BFS, DFS, Monte Carlo, Best First Search, and A* Search classes. The code features two inherited classes: MonteCarloTreeofThoughts for Monte Carlo Tree Search optimization and OptimizedTreeofThoughts for enhanced Tree of Thoughts algorithm using optimized search parameters.", + "details": [ + { + "comment": "The code provides a comprehensive changelog for the TreeofThoughts class, updating variable names and modifying code structure for better readability. The solve method updated variable names while keeping the __init__ method unchanged. Various methods have been renamed and updated with more meaningful variable names.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":0-35", + "content": "# Comprehensive Documentation and Changelog\nThis document provides a comprehensive overview of the changes made to the TreeofThoughts class and its methods to improve readability and understandability. The changes include updating variable names to be more meaningful and descriptive, as well as modifying the structure of the code for better readability.\n## Changelog\n1. TreeofThoughts Class\nUpdated the class definition to include a more descriptive docstring.\n2. __init__ Method\nNo changes were made to the __init__ method.\n3. solve Method\nUpdated variable names:\nx -> initial_prompt\nk -> num_thoughts\nT -> max_steps\nb -> max_states\nvth -> value_threshold\n4. tot_bfs Method\nUpdated variable names:\nx -> initial_prompt\nk -> num_thoughts\nT -> max_steps\nb -> max_states\nS0 -> current_states\nS0_t -> generated_states\nVt -> state_values\nSt -> selected_states\n5. tot_dfs Method\nUpdated variable names:\nx -> initial_prompt\nk -> num_thoughts\nT -> max_steps\nvth -> value_threshold\ns -> state\nt -> step\ns_prime -> next_state\nchild -> child_state" + }, + { + "comment": "Code snippet outlines the initialization, main entry point for solving problems (solve method), and optional parameters added for better control over the search process. No changes were made to save_tree_to_json or print_tree methods. The code introduces TreeofThoughts class which utilizes a tree-based search algorithm to solve problems using given model and search algorithm.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":37-59", + "content": "### Added optional parameters for better control over the search process:\npruning_threshold\nconfidence_threshold\nmax_iterations\nconvergence_threshold\nconvergence_count\n6. save_tree_to_json Method\nNo changes were made to the save_tree_to_json method.\n7. print_tree Method\nNo changes were made to the print_tree method.\n# Documentation\nTreeofThoughts Class\nThe TreeofThoughts class is designed to solve problems using a tree-based search algorithm. It takes a model and a search algorithm (either 'BFS' or 'DFS') as input and provides methods to solve problems using the chosen algorithm.\n## Initialization\nThe __init__ method initializes the TreeofThoughts class with the given model and search algorithm. It also initializes an empty tree structure to store the search results.\n## Solve Method\nThe solve method is the main entry point for solving problems using the TreeofThoughts class. It takes the following parameters:\ninitial_prompt: The initial problem or prompt to be solved.\nnum_thoughts: The number of thoughts to generate at each step." + }, + { + "comment": "The code defines maximum search parameters and uses either BFS or DFS to solve the problem based on the chosen algorithm. The tot_bfs method performs a breadth-first search with given parameters, while the solve method calls the appropriate search method depending on the chosen algorithm.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":60-76", + "content": "max_steps: The maximum number of steps to perform in the search.\nmax_states: The maximum number of states to consider at each step (for BFS).\nvalue_threshold: The threshold value for pruning states (for DFS).\ntimeout: The maximum time allowed for the search process.\nconfidence_threshold: The confidence threshold for stopping the search.\nmax_iterations: The maximum number of iterations allowed for the search.\nconvergence_threshold: The threshold for determining convergence.\nconvergence_count: The number of consecutive convergences required to stop the search.\nBased on the chosen search algorithm, the solve method calls either the tot_bfs or tot_dfs method to perform the search.\n## tot_bfs Method\nThe tot_bfs method performs a breadth-first search to solve the problem. It takes the following parameters:\ninitial_prompt: The initial problem or prompt to be solved.\nnum_thoughts: The number of thoughts to generate at each step.\nmax_steps: The maximum number of steps to perform in the search.\nmax_states: The maximum number of states to consider at each step." + }, + { + "comment": "The `pruning_threshold` is a value used to prune states during the search process. The method generates and evaluates states at each step, selecting the best states based on their values until either the maximum number of steps is reached or the best state is found.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":77-93", + "content": "pruning_threshold: The threshold value for pruning states.\nThe method generates and evaluates states at each step, selecting the best states based on their values. The search continues until the maximum number of steps is reached, and the best state is returned.\n## tot_dfs Method\nThe tot_dfs method performs a depth-first search to solve the problem. It takes the following parameters:\ninitial_prompt: The initial problem or prompt to be solved.\nnum_thoughts: The number of thoughts to generate at each step.\nmax_steps: The maximum number of steps to perform in the search.\nvalue_threshold: The threshold value for pruning states.\npruning_threshold: The threshold value for pruning states based on their values.\nconfidence_threshold: The confidence threshold for stopping the search.\nmax_iterations: The maximum number of iterations allowed for the search.\nconvergence_threshold: The threshold for determining convergence.\nconvergence_count: The number of consecutive convergences required to stop the search.\nTh" + }, + { + "comment": "This code describes a recursive depth-first search algorithm for exploring a state space. It evaluates states and stops based on thresholds, maximum steps, or convergence criteria. The save_tree_to_json method saves the tree structure and metrics to a JSON file, while the print_tree method visualizes the tree structure.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":93-106", + "content": "e method uses a recursive depth-first search approach to explore the state space. It generates and evaluates states at each step, and if a state's value is above the value_threshold and pruning_threshold, it continues the search with the new state. The search stops when the maximum number of steps is reached, the confidence threshold is met, or the convergence criteria are satisfied. The best state is then returned.\n## save_tree_to_json Method\nThe save_tree_to_json method saves the current tree structure and metrics to a JSON file. It takes the following parameter:\nfile_name: The name of the JSON file to save the tree structure and metrics.\nThis method is useful for logging the search process and analyzing the results later.\n## print_tree Method\nThe print_tree method prints the tree structure in a human-readable format. It takes the following parameters:\nnode: The current node in the tree.\ndepth: The depth of the current node in the tree (default is 0).\nThis method is useful for visualizing the tree structure and understanding the search process." + }, + { + "comment": "The code defines the \"TreeofThoughts\" class for implementing a Tree of Thoughts search algorithm, with methods like initialization, saving tree structure to JSON file, logging new state and adjusting pruning threshold percentile based on evaluated thoughts. The example provided demonstrates using the class with Monte Carlo, A* Search Algorithm, BFS, Best First Search.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":108-126", + "content": "## Usage\nTo use the TreeofThoughts class, follow these steps:\nInitialize the class with a model and a search algorithm (either 'BFS' or 'DFS').\nCall the solve method with the required parameters to perform the search and obtain the best state.\n(Optional) Use the save_tree_to_json method to save the tree structure and metrics to a JSON file.\n(Optional) Use the print_tree method to visualize the tree structure.\nHere's an example of how to use the TreeofThoughts class:\n# V2 with Monte Carlo, A* Search Algorithm, BFS, Best First Search\n### Class: TreeofThoughts\nThis class represents the base class for the Tree of Thoughts search algorithm. It contains the following methods:\n- `__init__(self, model)`: Initializes the TreeofThoughts object with the given model.\n- `save_tree_to_json(self, file_name)`: Saves the tree to a JSON file with the given file name.\n- `logNewState(self, state, evaluation)`: Logs a new state and its evaluation to the tree.\n- `adjust_pruning_threshold_precentile(self, evaluated_thoughts, percentile)`: Adjusts the pruning threshold based on the percentile of evaluated thoughts." + }, + { + "comment": "The code defines three classes: TreeofThoughtsBFS, TreeofThoughtsDFS, and TreeofThoughtsBEST. Each class represents a different search algorithm variant of the Tree of Thoughts search algorithm. The classes inherit from the TreeofThoughts class and contain methods to solve problems using BFS, DFS, or Best-First Search with given parameters.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":127-140", + "content": "- `adjust_pruning_threshold_moving_average(self, evaluated_thoughts, window_size)`: Adjusts the pruning threshold based on the moving average of evaluated thoughts.\n### Class: TreeofThoughtsBFS\nThis class represents the Breadth-First Search (BFS) variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method:\n- `solve(self, initial_prompt, num_thoughts, max_steps, max_states, value_threshold, pruning_threshold=0.5)`: Solves the problem using BFS with the given parameters.\n### Class: TreeofThoughtsDFS\nThis class represents the Depth-First Search (DFS) variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method:\n- `solve(self, initial_prompt, num_thoughts, max_steps, value_threshold, pruning_threshold=0.5)`: Solves the problem using DFS with the given parameters.\n### Class: TreeofThoughtsBEST\nThis class represents the Best-First Search variant of the Tree of Thoughts search algorithm. It contains the following methods:" + }, + { + "comment": "This code snippet represents two classes, TreeofThoughtsBEST and TreeofThoughtsASearch, for solving problems using the Best-First Search and A* Search variants of the Tree of Thoughts algorithm. The classes have initialization methods (__init__), a method to save trees in JSON format (save_tree_to_json), a method to log new states with their evaluations (log_new_state), and a solve method that takes initial prompt, number of thoughts, maximum steps, and pruning threshold as parameters. The TreeofThoughtsASearch class also has an is_goal method for determining if a state is a goal state based on its score and a reconstruct_path method to reconstruct the path from the initial state to the current state using the came_from dictionary.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":142-153", + "content": "- `__init__(self, model)`: Initializes the TreeofThoughtsBEST object with the given model.\n- `save_tree_to_json(self, file_name)`: Saves the tree to a JSON file with the given file name.\n- `log_new_state(self, state, evaluation)`: Logs a new state and its evaluation to the tree.\n- `solve(self, initial_prompt, num_thoughts, max_steps, pruning_threshold)`: Solves the problem using Best-First Search with the given parameters.\n### Class: TreeofThoughtsASearch\nThis class represents the A* Search variant of the Tree of Thoughts search algorithm. It contains the following methods:\n- `__init__(self, model)`: Initializes the TreeofThoughtsASearch object with the given model.\n- `solve(self, initial_prompt, num_thoughts=5, max_steps=30, pruning_threshold=0.4)`: Solves the problem using A* Search with the given parameters.\n- `is_goal(self, state, score)`: Determines if the given state is a goal state based on its score.\n- `reconstruct_path(self, came_from, current_state, initial_prompt)`: Reconstructs the path from the initial state to the current state using the came_from dictionary." + }, + { + "comment": "The code defines two classes, MonteCarloTreeofThoughts and OptimizedTreeofThoughts, both of which are derived from TreeofThoughts. MonteCarloTreeofThoughts uses Monte Carlo Tree Search to solve problems while optimizing search parameters based on the objective. On the other hand, OptimizedTreeofThoughts provides an optimized version of the Tree of Thoughts algorithm with a solve method.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":155-168", + "content": "### Class: MonteCarloTreeofThoughts\nThis class represents the Monte Carlo Tree Search variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following methods:\n- `__init__(self, model, objective=\"balance\")`: Initializes the MonteCarloTreeofThoughts object with the given model and objective.\n- `optimize_params(self, num_thoughts, max_steps, max_states)`: Optimizes the search parameters based on the objective.\n- `solve(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold)`: Solves the problem using\n Monte Carlo Tree Search with the given parameters.\n- `monte_carlo_search(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold)`: Performs the Monte Carlo Tree Search with the given parameters.\n### Class: OptimizedTreeofThoughts\nThis class represents an optimized version of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method:\n- `solve(self," + }, + { + "comment": "Solves problem with optimized search algorithm using given parameters (x, k, T, b, vth, timeout, confidence_threshold, max_iterations, convergence_threshold, convergence_count).", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/README.md\":168-168", + "content": " x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None)`: Solves the problem using an optimized search algorithm with the given parameters." + } + ] +} \ No newline at end of file diff --git a/docs/doc/b5af674f-24fc-4bfb-8139-2ab97ee3dc53.json b/docs/doc/b5af674f-24fc-4bfb-8139-2ab97ee3dc53.json new file mode 100644 index 0000000..2737253 --- /dev/null +++ b/docs/doc/b5af674f-24fc-4bfb-8139-2ab97ee3dc53.json @@ -0,0 +1,10 @@ +{ + "summary": "The code snippet is importing four libraries - transformers, openai, langchain, and swarms. These libraries are commonly used for natural language processing (transformers), text generation (openai), working with language models (langchain), and distributed computing tasks (swarms).", + "details": [ + { + "comment": "The code snippet is importing four libraries - transformers, openai, langchain, and swarms. These libraries are commonly used for natural language processing (transformers), text generation (openai), working with language models (langchain), and distributed computing tasks (swarms).", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/requirements.txt\":0-3", + "content": "transformers\nopenai\nlangchain\nswarms" + } + ] +} \ No newline at end of file diff --git a/docs/doc/b6df76a2-47c1-4b5a-aea5-72f769099465.json b/docs/doc/b6df76a2-47c1-4b5a-aea5-72f769099465.json new file mode 100644 index 0000000..20233c0 --- /dev/null +++ b/docs/doc/b6df76a2-47c1-4b5a-aea5-72f769099465.json @@ -0,0 +1,35 @@ +{ + "summary": "This code offers social media sharing tools for promoting the \"tree-of-thoughts\" project, which combines the Tree of Thoughts algorithm, OpenAI Language Model API, and user model connections to improve AI reasoning. It initializes a Monte Carlo Tree of Thoughts model, sets up an initial prompt, solves problems using the algorithm with specific parameters, and prints collaborative solutions.", + "details": [ + { + "comment": "This code is a README file that includes various sharing buttons for social media platforms like Discord, Twitter, LinkedIn, Facebook, and Reddit. These buttons allow users to easily share the project link with their networks.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/README.md\":0-8", + "content": "[![Multi-Modality](agorabanner.png)](https://discord.gg/qUtxnK2NMf)\n![Tree of Thoughts Banner](treeofthoughts.png)\n![Discord](https://img.shields.io/discord/999382051935506503)\n[![Twitter](https://img.shields.io/twitter/url?style=social&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)](https://twitter.com/intent/tweet?text=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts!%20https://github.com/kyegomez/tree-of-thoughts)\n[![LinkedIn](https://img.shields.io/badge/Share-LinkedIn-blue?style=social&logo=linkedin)](https://www.linkedin.com/sharing/share-offsite/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)\n[![Facebook](https://img.shields.io/badge/Share-Facebook-blue?style=social&logo=facebook)](https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)\n[![Reddit](https://img.shields.io/badge/Share-Reddit-orange?style=social&logo=reddit)](https://www.reddit.com/submit?url=https%3A%" + }, + { + "comment": "Code snippet provides sharing options for the \"tree-of-thoughts\" project on various social media platforms like Hacker News, Pinterest, and WhatsApp using badges. The links are prefilled with a message to promote the project's purpose: improving AI reasoning through the Tree of Thoughts.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/README.md\":8-11", + "content": "2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&title=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21)\n[![Hacker News](https://img.shields.io/badge/Share-Hacker%20News-orange?style=social&logo=y-combinator)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&t=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21)\n[![Pinterest](https://img.shields.io/badge/Share-Pinterest-red?style=social&logo=pinterest)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&media=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts%2Fraw%2Fmain%2Ftree-of-thoughts.jpeg&description=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21)\n[![WhatsApp](https://img.shields.io/badge/Share-WhatsApp-green?style=social&logo=whatsapp)](https://api.whatsapp.com/send?text=Check" + }, + { + "comment": "This code installs the Tree of Thoughts algorithm, which significantly improves model reasoning by up to 70%. Users can connect their own models and experience superintelligence. The code imports necessary classes, initializes an OpenAILanguageModel with an API key, and initializes a MonteCarloTreeofThoughts class with the model for improved reasoning.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/README.md\":11-45", + "content": "%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21%20https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)\n[Paper link](https://arxiv.org/pdf/2305.10601.pdf)\n[Author's implementation](https://github.com/princeton-nlp/tree-of-thought-llm)\n## Introduction\nTree of Thoughts (ToT) is a powerful and flexible algorithm that significantly advances model reasoning by up to 70%. This plug-and-play version allows you to connect your own models and experience superintelligence!\n## Install\n```bash\npip install tree-of-thoughts\n```\n## Usage\n```python\nimport os\nfrom tree_of_thoughts.openai_models import OpenAILanguageModel\nfrom tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts\nfrom dotenv import load_dotenv\nload_dotenv()\napi_key = os.environ.get(\"OPENAI_API_KEY\")\n# Initialize the OpenAILanguageModel class with the API key\nmodel = OpenAILanguageModel(api_key=api_key)\n# Initialize the MonteCarloTreeofThoughts class with the model\ntree_of_thoughts = MonteCarloTreeofThoughts(model)" + }, + { + "comment": "The code is initializing the initial prompt, defining parameters for generating thoughts (e.g., number of thoughts, maximum steps and states), and setting a pruning threshold for tree search. It then calls the solve function from tree_of_thoughts module to generate thoughts using the Hugging Face Transformers language model with the given parameters. The solution is printed at the end.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/README.md\":47-95", + "content": "# Define the initial prompt\ninitial_prompt = \"\"\"\nInput: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation\nPossible next steps:\n\"\"\"\n# Define the number of thoughts to generate\nnum_thoughts = 1\nmax_steps = 3\nmax_states = 4\npruning_threshold = 0.5\n# Generate the thoughts\nsolution = tree_of_thoughts.solve(\n initial_prompt=initial_prompt,\n num_thoughts=num_thoughts,\n max_steps=max_steps,\n max_states=max_states,\n pruning_threshold=pruning_threshold,\n # sleep_time=sleep_time\n)\nprint(f\"Solution: {solution}\")\n```\n### ToT with HF LLM\nTo run Hugging Face Transformers with Tree of Thoughts:\n```python\nfrom tree_of_thoughts import TreeofThoughts, HuggingLanguageModel, MonteCarloTreeofThoughts\nmodel_name=\"01-ai/Yi-34B\"\nmodel = HuggingLanguageModel(model_name, " + }, + { + "comment": "The code initializes a MonteCarloTreeofThoughts model, sets up an initial prompt, and then solves the problem using the Tree of Thoughts algorithm with specified parameters.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/README.md\":96-139", + "content": " model_tokenizer=model_name, \n verbose=True)\n# Initialize the MonteCarloTreeofThoughts class with the model\ntree_of_thoughts = MonteCarloTreeofThoughts(model)\n# Note to reproduce the same results from the tree of thoughts paper if not better, \n# craft an 1 shot chain of thought prompt for your task below\ninitial_prompt = \"\"\"\nInput: 2 8 8 14\nPossible next steps:\n2 + 8 = 10 (left: 8 10 14)\n8 / 2 = 4 (left: 4 8 14)\n14 + 2 = 16 (left: 8 8 16)\n2 * 8 = 16 (left: 8 14 16)\n8 - 2 = 6 (left: 6 8 14)\n14 - 8 = 6 (left: 2 6 8)\n14 / 2 = 7 (left: 7 8 8)\n14 - 2 = 12 (left: 8 8 12)\nInput: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation\nPossible next steps:\n\"\"\"\nnum_thoughts = 1\nmax_steps = 3\nmax_states = 4\npruning_threshold = 0.5\nsolution = tree_of_thoughts.solve(\n initial_prompt=initial_prompt,\n num_thoughts=num_thoughts, \n max_steps=max_steps, \n max_states=max_states, \n pruning_threshold=pruning_threshold,\n # sleep_time=sleep_time" + }, + { + "comment": "Code snippet for printing the solution obtained after solving a problem collaboratively using the tree of thoughts method.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/README.md\":140-161", + "content": ")\nprint(f\"Solution: {solution}\")\n```\n### Basic Prompts\n- Copy and paste this into your llm!\n```\n\"Three experts with exceptional logical thinking skills are collaboratively answering a question using the tree of thoughts method. Each expert will share their thought process in detail, taking into account the previous thoughts of others and admitting any errors. They will iteratively refine and expand upon each other's ideas, giving credit where it's due. The process continues until a conclusive answer is found. Organize the entire response in a markdown table format. The task is:\n```\n# Acknowledgements\nThanks to: Shunyu Yao Princeton University, Dian Yu Google DeepMind, Jeffrey Zhao, Google DeepMind, Izhak Shafran Google DeepMind, Thomas L. Griffiths, Princeton University, Yuan Cao Google DeepMind, Karthik Narasimha, Princeton University for sharing this amazing work with the world!\nAnd, thanks to Phil Wang or Lucidrains for inspiring me to devote myself to open source AI Research\n# License\nApache" + } + ] +} \ No newline at end of file diff --git a/docs/doc/cfedd51c-0490-44e8-bfbd-7b482d49fbc8.json b/docs/doc/cfedd51c-0490-44e8-bfbd-7b482d49fbc8.json new file mode 100644 index 0000000..0fd8cb7 --- /dev/null +++ b/docs/doc/cfedd51c-0490-44e8-bfbd-7b482d49fbc8.json @@ -0,0 +1,10 @@ +{ + "summary": "This code defines an abstract base class `AbstractLanguageModel` for language models with two required methods: `generate_thoughts` and `evaluate_states`. It uses the ABC (Abstract Base Classes) module from Python's `abc` library.", + "details": [ + { + "comment": "This code defines an abstract base class `AbstractLanguageModel` for language models with two required methods: `generate_thoughts` and `evaluate_states`. It uses the ABC (Abstract Base Classes) module from Python's `abc` library.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/base.py\":0-10", + "content": "from abc import ABC, abstractmethod\nclass AbstractLanguageModel(ABC):\n @abstractmethod\n def generate_thoughts(self, state, k):\n pass\n @abstractmethod\n def evaluate_states(self, states):\n pass" + } + ] +} \ No newline at end of file diff --git a/docs/doc/d3770be6-a567-4799-81f8-c847a2eea9b6.json b/docs/doc/d3770be6-a567-4799-81f8-c847a2eea9b6.json new file mode 100644 index 0000000..4cad2c7 --- /dev/null +++ b/docs/doc/d3770be6-a567-4799-81f8-c847a2eea9b6.json @@ -0,0 +1,15 @@ +{ + "summary": "The given code configures the \"pyproject.toml\" file to define project details, specify dependencies, and set up linting and formatting for Python code using Ruff and Black. It enables aggressive mode, ignores certain files, and previews changes before applying them.", + "details": [ + { + "comment": "This code defines a project's details in the pyproject.toml file, including name, version, author, license, keywords, and classifiers. It also specifies dependencies and development dependencies for the project.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/pyproject.toml\":0-40", + "content": "[tool.poetry]\nname = \"tree-of-thoughts\"\nversion = \"0.3.9\"\ndescription = \"Tree of Thoughts - Pytorch\"\nauthors = [\"Kye Gomez \"]\nlicense = \"MIT\"\nreadme = \"README.md\" # Assuming you have a README.md file\nhomepage = \"https://github.com/kyegomez/tree-of-thoughts\"\nkeywords = [\"artificial intelligence\", \"deep learning\", \"optimizers\", \"Prompt Engineering\"]\nclassifiers = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.6\",\n]\n[tool.poetry.dependencies]\npython = \"^3.6\"\ntransformers = \"*\"\nswarms = \"*\"\n[tool.poetry.dev-dependencies]\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n[tool.poetry.group.lint.dependencies]\nruff = \"^0.0.249\"\ntypes-toml = \"^0.10.8.1\"\ntypes-redis = \"^4.3.21.6\"\ntypes-pytz = \"^2023.3.0.0\"\nblack = \"^23.1.0\"\ntypes-chardet = \"^5.0.4.6\"\nmypy-protobuf = \"^3.0.0\"\n[tool.autopep8]\nmax_line_length = 80" + }, + { + "comment": "The code configures the \"pyproject.toml\" file for linting and formatting Python code using Ruff and Black. It sets an ignore list, enables in-place correction, recursive scanning, and aggressive mode with a threshold of 3. The line length is set to 80, target version is py38, and previews changes before applying them.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/pyproject.toml\":41-52", + "content": "ignore = \"E501,W6\" # or [\"E501\", \"W6\"]\nin-place = true\nrecursive = true\naggressive = 3\n[tool.ruff]\nline-length = 80\n[tool.black]\nline-length = 80\ntarget-version = ['py38']\npreview = true" + } + ] +} \ No newline at end of file diff --git a/docs/doc/f3dfe944-4e43-4481-8c23-ad7a1c2576d4.json b/docs/doc/f3dfe944-4e43-4481-8c23-ad7a1c2576d4.json new file mode 100644 index 0000000..fddd5e8 --- /dev/null +++ b/docs/doc/f3dfe944-4e43-4481-8c23-ad7a1c2576d4.json @@ -0,0 +1,20 @@ +{ + "summary": "The `HuggingLanguageModel` class generates solutions using the HuggingfaceLLM model, while the function evaluates states and handles errors by printing them and resetting state values to 0.", + "details": [ + { + "comment": "This code defines a class `HuggingLanguageModel` that uses a HuggingfaceLLM model for generating coherent solutions based on a given state. The `generate_thoughts` method takes a state, number of thoughts to generate (k), and optional maximum length of generated thoughts, and returns the generated thoughts as a list. If an error occurs while generating thoughts, it prints an error message and returns an empty list.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/huggingface_model.py\":0-30", + "content": "from swarms.models import HuggingfaceLLM\nclass HuggingLanguageModel:\n def __init__(\n self, model_name, model_tokenizer=None, verbose=False, *args, **kwargs\n ):\n self.model = HuggingfaceLLM(model_name, *args, **kwargs)\n self.verbose = verbose\n def generate_thoughts(self, state, k, max_length=100):\n state_text = \" \".join(state)\n prompt = (\n \"Write down your observations in format 'Observation:xxxx', then\"\n \" write down your thoughts in format 'Thoughts:xxxx Given the\"\n f\" current state of reasoning: '{state_text}', generate\"\n f\" {k} coherent solutions to achieve {state_text}\"\n )\n if self.verbose:\n print(f\"Generating thoughts for state: {state_text}\")\n try:\n self.model.run(prompt)\n except Exception as e:\n if self.verbose:\n print(f\"Error generating thoughts for state: {state_text}\")\n print(f\"Error: {e}\")\n thoughts = []\n return thoughts" + }, + { + "comment": "This function evaluates states and their potential to achieve a given prompt using the model. It joins state elements into a string, constructs a prompt for the model, tries to predict value as float between 0 and 1, handles ValueError if conversion fails, assigning default value, and handles other exceptions.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/huggingface_model.py\":32-57", + "content": " def evaluate_states(self, states, initial_prompt, max_length=10):\n state_values = {}\n for state in states:\n state_text = \" \".join(state)\n prompt = (\n f\"Given the current state of reasoning: '{state_text}',\"\n \" pessimitically evaluate its value as a float between 0 and 1\"\n f\" based on it's potential to achieve {initial_prompt}\"\n )\n if self.verbose:\n print(f\"Evaluating state: {state_text}\")\n try:\n value_text = self.model(prompt)\n value = float(value_text)\n except ValueError:\n if self.verbose:\n print(\n \"Error converting value to float for state:\"\n f\" {state_text}\"\n )\n value = 0 # Assign a default value if the conversion fails\n except Exception as e:\n if self.verbose:\n print(f\"Error evaluating state: {state_text}\")" + }, + { + "comment": "Catching and printing an error, then setting state values to 0 if an error occurs.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/huggingface_model.py\":58-63", + "content": " print(f\"Error: {e}\")\n value = 0\n state_values[state] = value\n return state_values" + } + ] +} \ No newline at end of file diff --git a/docs/doc/f9ac539f-e191-49ed-a2e7-76d28a05ed9b.json b/docs/doc/f9ac539f-e191-49ed-a2e7-76d28a05ed9b.json new file mode 100644 index 0000000..e699d50 --- /dev/null +++ b/docs/doc/f9ac539f-e191-49ed-a2e7-76d28a05ed9b.json @@ -0,0 +1,95 @@ +{ + "summary": "The code defines a `TreeofThoughts` class with JSON saving, logging, and BFS/DFS-based subclass for solving problems concurrently. It uses A* search, PriorityQueue, Monte Carlo tree search, transposition table, pruning, UCB1, and checks solution validity.", + "details": [ + { + "comment": "The code defines a class called TreeofThoughts with an init method that initializes object attributes, including a dictionary for the tree structure. It also includes methods to save the tree to JSON and log new states along with their associated thoughts or evaluations. The history attribute is added in the init method as an empty list.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":0-39", + "content": "import concurrent.futures\nimport json\nimport logging\nimport os\nimport time\nfrom queue import PriorityQueue\nfrom typing import Any, Dict, Union\nimport numpy as np\nDATA_PATH = \"./data\"\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\"\n)\nlogger = logging.getLogger(__name__)\nclass TreeofThoughts:\n def __init__(self, model):\n self.model = model\n self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {\n \"nodes\": {},\n }\n self.best_state = None\n self.best_value = float(\"-inf\")\n self.history = [] # added line initalize history\n def save_tree_to_json(self, file_name):\n os.makedirs(os.path.dirname(file_name), exist_ok=True)\n with open(file_name, \"w\") as json_file:\n json.dump(self.tree, json_file, indent=4)\n def logNewState(self, state, evaluation):\n if not (type(state) == str):\n state = \" | \".join(state)\n if state in self.tree[\"nodes\"]:\n self.tree[\"nodes\"][state][\"thoughts\"].append(evaluation)" + }, + { + "comment": "This code defines a class `TreeofThoughtsBFS` that inherits from `TreeofThoughts`. It has methods for adjusting pruning thresholds based on percentile and moving average. The `solve()` method takes an initial prompt, number of thoughts, max steps, max states, value threshold, and pruning threshold. It uses BFS to explore the tree of thoughts.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":40-78", + "content": " else:\n self.tree[\"nodes\"][state] = {\"thoughts\": [evaluation]}\n def adjust_pruning_threshold_precentile(\n self, evaluated_thoughts, percentile\n ):\n values = np.array(list(evaluated_thoughts.values()))\n if values.size == 0:\n return 0\n return max(np.percentile(values, percentile), 0.1)\n def adjust_pruning_threshold_moving_average(\n self, evaluated_thoughts, window_size\n ):\n values = list(evaluated_thoughts.values())\n if len(values) < window_size:\n return np.mean(values) if values else 0\n else:\n return max(np.mean(values[-window_size:]), 0.1)\n######################\nclass TreeofThoughtsBFS(TreeofThoughts):\n def solve(\n self,\n initial_prompt,\n num_thoughts,\n max_steps,\n max_states,\n value_threshold,\n pruning_threshold=0.5,\n ):\n current_states = [initial_prompt]\n state_values = {}\n dynamic_pruning_threshold = pruning_threshold\n try:" + }, + { + "comment": "This code uses a ThreadPoolExecutor to concurrently evaluate multiple thoughts generated from states. It selects thoughts from the current_states, submits them for evaluation, waits for the results, and filters out non-numeric results before storing the evaluated thoughts.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":79-99", + "content": " with concurrent.futures.ThreadPoolExecutor() as executor:\n for step in range(1, max_steps + 1):\n selected_states = []\n for state in current_states:\n thoughts = self.model.generate_thoughts(\n state, num_thoughts, initial_prompt\n )\n futures = [\n executor.submit(\n self.model.evaluate_states,\n {thought: 0},\n initial_prompt,\n )\n for thought in thoughts\n ]\n concurrent.futures.wait(futures)\n evaluated_thoughts = {\n thought: fut.result()\n for thought, fut in zip(thoughts, futures)\n if isinstance(fut.result(), (int, float))\n } # check if result is a number" + }, + { + "comment": "This code checks if there are evaluated thoughts and adjusts the pruning threshold based on them. It then loops through the evaluated thoughts, flattens the state with the thought (if the state is a string), and appends it to selected_states along with the value. The selected states are sorted by value in descending order, and only the top max_states are kept.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":101-121", + "content": " if (\n evaluated_thoughts\n ): # only adjust if you have evaluated thoughts\n dynamic_pruning_threshold = (\n self.adjust_pruning_threshold_moving_average(\n evaluated_thoughts, 5\n )\n )\n for thought, value in evaluated_thoughts.items():\n flattened_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n selected_states.append((flattened_state, value))\n selected_states.sort(key=lambda x: x[1], reverse=True)\n selected_states = selected_states[\n :max_states\n ] # Select only the top states" + }, + { + "comment": "This code block iterates through selected_states and assigns values to state_values if the value is above a certain threshold. It logs the state_values, and if there are values in state_values, it finds the highest rated solution, generates a solution using the model, and logs any errors that occur during this process.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":123-138", + "content": " for state, value in selected_states:\n if value >= dynamic_pruning_threshold:\n state_values[state] = value\n self.logNewState(state, value)\n logger.debug(f\"State Values: {state_values}\")\n # if state_values:\n # highest_rated_solution = max(state_values.items(), key=lambda x: x[1])\n # print(f\"highest rated solution: {highest_rated_solution}\")\n # highest_rated_state = highest_rated_solution[0] # Use a different name to avoid confusion\n # print(f'highest rated state: {highest_rated_state}')\n # try:\n # solution = self.model.generate_solution(initial_prompt, highest_rated_state)\n # except Exception as e:\n # logger.error(f\"Error in generating solution: {e}\")\n # solution = None # Set a fallback value for solution" + }, + { + "comment": "The code checks if there is a solution and returns it, otherwise, it finds the highest rated state using DFS, generates a solution from that state, and returns it. If there is an error during execution, it logs the error and returns None.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":140-171", + "content": " # return solution if solution is not None else highest_rated_state # Return highest rated state if solution is None\n if state_values:\n highest_rated_solution = max(\n state_values.items(), key=lambda x: x[1]\n )\n highest_rated_state = highest_rated_solution[0]\n solution = self.model.generate_solution(\n initial_prompt, highest_rated_state\n )\n print(\n \"Highest_rated solution:\"\n f\" {highest_rated_solution} highest_rated_solution:\"\n f\" {highest_rated_solution} Solution: {solution}\"\n )\n return solution if solution else highest_rated_state\n else:\n return None\n except Exception as e:\n logger.error(f\"Error in tot_bfs: {e}\")\n return None\n###########\nclass TreeofThoughtsDFS(TreeofThoughts):\n def solve(\n self,\n initial_prompt," + }, + { + "comment": "The code defines a function that performs Depth-First Search (DFS) to explore the thought space and evaluate potential thoughts. It takes in parameters for maximum steps, pruning threshold, and others. The function generates initial thoughts, evaluates them, filters out lower-scoring ones, and continues DFS on the remaining thoughts until max_steps is reached or all thoughts are exhausted.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":172-202", + "content": " num_thoughts,\n max_steps,\n value_threshold,\n pruning_threshold=0.5,\n ):\n output = []\n def dfs(state, step):\n nonlocal output\n if step > max_steps:\n thought = self.model.generate_thoughts(state, 1, initial_prompt)\n value = self.model.evaluate_states({state}, initial_prompt)[\n state\n ]\n output.append((thought, value))\n return\n thoughts = self.model.generate_thoughts(\n state, self.num_thoughts, initial_prompt\n )\n evaluated_thoughts = self.model.evaluate_states(\n {thought: 0 for thought in thoughts}, initial_prompt\n )\n filtered_thoughts = [\n thought\n for thought in thoughts\n if evaluated_thoughts[thought] >= self.pruning_threshold\n ]\n for next_state in filtered_thoughts:\n state_value = self.model.evaluate_states(" + }, + { + "comment": "This code is implementing a best-first search algorithm using a priority queue. It explores the state space of the quality of states, saves the tree to JSON format, and handles exceptions in case any occur during execution. The goal is to find the best solution for a given model and initial prompt.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":203-232", + "content": " {next_state: 0}, initial_prompt\n )[next_state]\n if state_value > self.value_threshold:\n child = (\n (state, next_state)\n if isinstance(state, str)\n else (*state, next_state)\n )\n dfs(child, step + 1)\n try:\n dfs(initial_prompt, 1)\n best_state, _ = max(output, key=lambda x: x[1])\n solution = self.model.generate_solution(initial_prompt, best_state)\n return solution if solution else best_state\n except Exception as e:\n logger.error(f\"Error in tot_dfs: {e}\")\n return None\n# v2 => best first search => explores state space of the quality of the states\n# priority que or greedy BFS\nclass TreeofThoughtsBEST:\n def __init__(self, model):\n self.model = model\n self.tree = {\"nodes\": {}}\n def save_tree_to_json(self, file_name):\n os.makdirs(os.path.dirname(file_name), exist_ok=True)" + }, + { + "comment": "This code initializes a tree of thoughts, logs new states with their evaluations, solves the tree by generating and evaluating thoughts for each state up to a certain number, and stores the tree in a JSON file. It utilizes PriorityQueue, set, and functions for thought generation and evaluation.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":233-264", + "content": " with open(file_name, \"w\") as json_file:\n json.dump(self.tree, json_file, indent=4)\n def log_new_state(self, state, evaluation):\n state_key = \" | \".join(state) if isinstance(state, tuple) else state\n if state_key in self.tree[\"nodes\"]:\n self.tree[\"nodes\"][state_key][\"thoughts\"].append(evaluation)\n else:\n self.tree[\"nodes\"][\"state_key\"] = {\"thoughts\": [evaluation]}\n def solve(self, initial_prompt, num_thoughts, max_steps, pruning_threshold):\n visited_states = set()\n state_queue = PriorityQueue()\n state_queue.put((0, initial_prompt))\n for _ in range(max_steps):\n if state_queue.empty():\n break\n _, state = state_queue.get()\n if state in visited_states:\n continue\n visited_states.add(state)\n thoughts = self.model.generate_thoughts(\n state, num_thoughts, initial_prompt\n )\n evaluated_thoughts = {\n thought: self.model.evaluate_states(" + }, + { + "comment": "The code performs a search using the A* algorithm to find the highest-rated solution for a given initial prompt. It takes in a model and parameters such as the number of thoughts, maximum steps, and generates solutions based on evaluating states and generating new states. The best state is determined based on evaluation, and the function returns either the generated solution or the best state if no solution was found.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":265-295", + "content": " {thought: 0}, initial_prompt\n )[thought]\n for thought in thoughts\n }\n for thought, value in evaluated_thoughts.items():\n if value >= pruning_threshold:\n new_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n state_queue.put((value, new_state))\n self.log_new_state(new_state, value)\n best_state = max(visited_states, key=self.model.evaluate_states)\n solution = self.model.generate_solution(initial_prompt, best_state)\n print(f\"Highest_rated solution: {best_state} Solution: {solution}\")\n return solution if solution else best_state\n# A* search algorithm\nclass TreeofThoughtsASearch:\n def __init__(self, model):\n self.model = model\n def solve(\n self,\n initial_prompt,\n num_thoughts=5,\n max_steps=30," + }, + { + "comment": "The code initializes a graph search algorithm with priority queue, sets visited_states and dictionaries for g_scores, f_scores, and came_from. It then iterates through max_steps to find a goal state, breaks if none found, and returns reconstructed path if a goal is reached.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":296-328", + "content": " pruning_threshold=0.4,\n ):\n # the open set is implemented as a piorituve quue where the priority is -f_score\n open_set = PriorityQueue()\n open_set.put((0, 0, initial_prompt))\n # the set of visited_states\n visited_states = set()\n # the g_scores and f-scores are stored as dictionaries\n g_scores = {initial_prompt: 0}\n f_scores = {\n initial_prompt: self.model.evaluate_states(\n {initial_prompt: 0}, initial_prompt\n )[initial_prompt]\n }\n # the parent of each state is stored in a dictionary\n came_from = {}\n for _ in range(max_steps):\n if open_set.empty():\n break\n _, _, current_state = open_set.get()\n if self.is_goal(current_state, f_scores[current_state]):\n return self.reconstruct_path(\n came_from, current_state, initial_prompt\n )\n thoughts = self.model.generate_thoughts(\n current_state, num_thoughts, initial_prompt" + }, + { + "comment": "This code evaluates states in a tree-like structure by using a model to assign scores. It then selects the best state, updating g_scores and f_scores based on these scores. Finally, it reconstructs the path from the selected state back to the initial prompt.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":329-355", + "content": " )\n evaluated_thoughts = {\n thought: self.model.evaluate_states(\n {thought: 0}, initial_prompt\n )[thought]\n for thought in thoughts\n }\n for thought, value in evaluated_thoughts.items():\n if value < pruning_threshold or thought in visited_states:\n continue\n tentative_g_score = g_scores[current_state] + 1 / value\n if (\n thought not in g_scores\n or tentative_g_score < g_scores[thought]\n ):\n came_from[thought] = current_state\n g_scores[thought] = tentative_g_score\n f_scores[thought] = tentative_g_score + value\n open_set.put(\n (-f_scores[thought], g_scores[thought], thought)\n )\n return self.reconstruct_path(came_from, current_state, initial_prompt)\n def is_goal(self, state, score):" + }, + { + "comment": "The code checks if the eval state is above 0.9 and returns a boolean value. It then reconstructs the path by traversing backwards from the current state to the initial state, and generates a solution using the model. The solution or path is returned as output.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":356-383", + "content": " # if eval state is above 0.9\n return score >= 0.9\n def reconstruct_path(self, came_from, current_state, initial_prompt):\n path = [current_state]\n while current_state in came_from:\n current_state = came_from[current_state]\n path.append(current_state)\n path.reverse()\n path = self.reconstruct_path(came_from, current_state, initial_prompt)\n solution = self.model.generate_solution(initial_prompt, path)\n print(f\"Path: {path} solution: {solution}\")\n return solution if solution else path\nclass MonteCarloTreeofThoughts(TreeofThoughts):\n def __init__(self, model, objective=\"balance\"):\n super().__init__(model)\n self.objective = objective\n self.solution_found = False\n self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = {\n \"nodes\": {},\n \"metrics\": {\"thoughts\": {}, \"evaluations\": {}},\n }\n def optimize_params(self, num_thoughts, max_steps, max_states):\n if self.objective == \"speed\":" + }, + { + "comment": "The code snippet is from the \"treeofthoughts.py\" file. It contains a function that adjusts the number of thoughts, max steps, and max states based on different objectives (\"reliability\" or \"balance\"). If the solution has been found, it reduces these values; otherwise, it increases them. The solve() function initializes parameters and calls monte_carlo_search().", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":384-415", + "content": " num_thoughts = max(1, num_thoughts - 1)\n max_steps = max(1, max_steps - 1)\n max_states = max(1, max_states - 1)\n elif self.objective == \"reliability\":\n num_thoughts += 1\n max_steps += 1\n max_states += 1\n elif self.objective == \"balanace\":\n if self.solution_found:\n num_thoughts = max(1, num_thoughts - 1)\n max_steps = max(1, max_steps - 1)\n max_states = max(1, max_states - 1)\n else:\n num_thoughts += 1\n max_steps += 1\n max_states += 1\n return num_thoughts, max_steps, max_states\n def solve(\n self,\n initial_prompt: str,\n num_thoughts: int,\n max_steps: int,\n max_states: int,\n pruning_threshold: float,\n # sleep_time: float,\n ):\n self.file_name = \"logs/tree_of_thoughts_output_montecarlo.json\"\n return self.monte_carlo_search(\n initial_prompt,\n num_thoughts," + }, + { + "comment": "Function `monte_carlo_search` performs a Monte Carlo tree search algorithm for generating thoughts from an initial prompt. It iteratively expands states, evaluates them, and updates the transposition table and state values. Pruning is applied based on threshold and visit counts to optimize search process.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":416-451", + "content": " max_steps,\n max_states,\n pruning_threshold,\n # sleep_time,\n )\n # v3\n def monte_carlo_search(\n self,\n initial_prompt: str,\n num_thoughts: int,\n max_steps: int,\n max_states: int,\n pruning_threshold: float,\n ):\n current_states = [initial_prompt]\n state_values = {}\n visit_counts = {initial_prompt: 0}\n transposition_table = {}\n best_state = None\n best_value = float(\"-inf\")\n for step in range(1, max_steps + 1):\n selected_states = []\n for state in current_states:\n if state in transposition_table:\n transposition_table[state]\n else:\n time.sleep(1)\n thoughts = self.model.generate_thoughts(\n state, num_thoughts, initial_prompt\n )\n time.sleep(1)\n evaluated_thoughts = self.model.evaluate_states(" + }, + { + "comment": "This code is creating a transposition table and updating visit counts. It iterates over evaluated thoughts, flattens the states, updates or adds to the transposition table, and checks the visit count. If the new state's count is greater than its parent state's count, it updates the UCB1 value by adding the thought value and the square root of the visit count.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":452-477", + "content": " thoughts, initial_prompt\n )\n for thought, value in evaluated_thoughts.items():\n flattened_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n transposition_table[flattened_state] = value\n for thought, value in evaluated_thoughts.items():\n flattened_state = (\n (state, thought)\n if isinstance(state, str)\n else (*state, thought)\n )\n if flattened_state not in visit_counts:\n visit_counts[flattened_state] = 0\n if (\n visit_counts[state] > visit_counts[flattened_state]\n and visit_counts[flattened_state] > 0\n ):\n ucb1_value = value + np.sqrt(" + }, + { + "comment": "This code is selecting states for a tree-based model. It uses UCB1 (Upper Confidence Bound for the first time) to select states and keeps track of visit counts, values, and best state. If too many states are selected, it only considers the top maximum_states. Finally, it saves the tree as JSON and generates a solution if a best state is found.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":478-502", + "content": " 2\n * np.log(visit_counts[state])\n / visit_counts[flattened_state]\n )\n if ucb1_value >= pruning_threshold:\n selected_states.append(flattened_state)\n state_values[flattened_state] = value\n # Update the best state if the current state value is greater than the best value\n if value > best_value:\n best_state = flattened_state\n best_value = value\n visit_counts[state] += 1\n if len(selected_states) > max_states:\n current_states = selected_states[:max_states]\n self.save_tree_to_json(self.file_name)\n # if best_state is not None:\n # solution = self.model.generate_solution(initial_prompt, best_state)\n # return solution\n # else:\n # solution = None" + }, + { + "comment": "This code block checks if the generated solution from the model is valid. If it's not, it returns the best_state as the final result.", + "location": "\"/media/root/Prima/works/tree-of-thoughts/docs/src/tree_of_thoughts/treeofthoughts.py\":504-506", + "content": " # return None\n solution = self.model.generate_solution(initial_prompt, best_state)\n return solution if solution else best_state" + } + ] +} \ No newline at end of file diff --git a/docs/github-markdown.css b/docs/github-markdown.css new file mode 100755 index 0000000..96a4f29 --- /dev/null +++ b/docs/github-markdown.css @@ -0,0 +1,1197 @@ +@media (prefers-color-scheme: dark) { + + .markdown-body, + [data-theme="dark"] { + /*dark*/ + color-scheme: dark; + --color-prettylights-syntax-comment: #8b949e; + --color-prettylights-syntax-constant: #79c0ff; + --color-prettylights-syntax-entity: #d2a8ff; + --color-prettylights-syntax-storage-modifier-import: #c9d1d9; + --color-prettylights-syntax-entity-tag: #7ee787; + --color-prettylights-syntax-keyword: #ff7b72; + --color-prettylights-syntax-string: #a5d6ff; + --color-prettylights-syntax-variable: #ffa657; + --color-prettylights-syntax-brackethighlighter-unmatched: #f85149; + --color-prettylights-syntax-invalid-illegal-text: #f0f6fc; + --color-prettylights-syntax-invalid-illegal-bg: #8e1519; + --color-prettylights-syntax-carriage-return-text: #f0f6fc; + --color-prettylights-syntax-carriage-return-bg: #b62324; + --color-prettylights-syntax-string-regexp: #7ee787; + --color-prettylights-syntax-markup-list: #f2cc60; + --color-prettylights-syntax-markup-heading: #1f6feb; + --color-prettylights-syntax-markup-italic: #c9d1d9; + --color-prettylights-syntax-markup-bold: #c9d1d9; + --color-prettylights-syntax-markup-deleted-text: #ffdcd7; + --color-prettylights-syntax-markup-deleted-bg: #67060c; + --color-prettylights-syntax-markup-inserted-text: #aff5b4; + --color-prettylights-syntax-markup-inserted-bg: #033a16; + --color-prettylights-syntax-markup-changed-text: #ffdfb6; + --color-prettylights-syntax-markup-changed-bg: #5a1e02; + --color-prettylights-syntax-markup-ignored-text: #c9d1d9; + --color-prettylights-syntax-markup-ignored-bg: #1158c7; + --color-prettylights-syntax-meta-diff-range: #d2a8ff; + --color-prettylights-syntax-brackethighlighter-angle: #8b949e; + --color-prettylights-syntax-sublimelinter-gutter-mark: #484f58; + --color-prettylights-syntax-constant-other-reference-link: #a5d6ff; + --color-fg-default: #e6edf3; + --color-fg-muted: #848d97; + --color-fg-subtle: #6e7681; + --color-canvas-default: #0d1117; + --color-canvas-subtle: #161b22; + --color-border-default: #30363d; + --color-border-muted: #21262d; + --color-neutral-muted: rgba(110, 118, 129, 0.4); + --color-accent-fg: #2f81f7; + --color-accent-emphasis: #1f6feb; + --color-success-fg: #3fb950; + --color-success-emphasis: #238636; + --color-attention-fg: #d29922; + --color-attention-emphasis: #9e6a03; + --color-attention-subtle: rgba(187, 128, 9, 0.15); + --color-danger-fg: #f85149; + --color-danger-emphasis: #da3633; + --color-done-fg: #a371f7; + --color-done-emphasis: #8957e5; + } +} + +@media (prefers-color-scheme: light) { + + .markdown-body, + [data-theme="light"] { + /*light*/ + color-scheme: light; + --color-prettylights-syntax-comment: #57606a; + --color-prettylights-syntax-constant: #0550ae; + --color-prettylights-syntax-entity: #6639ba; + --color-prettylights-syntax-storage-modifier-import: #24292f; + --color-prettylights-syntax-entity-tag: #116329; + --color-prettylights-syntax-keyword: #cf222e; + --color-prettylights-syntax-string: #0a3069; + --color-prettylights-syntax-variable: #953800; + --color-prettylights-syntax-brackethighlighter-unmatched: #82071e; + --color-prettylights-syntax-invalid-illegal-text: #f6f8fa; + --color-prettylights-syntax-invalid-illegal-bg: #82071e; + --color-prettylights-syntax-carriage-return-text: #f6f8fa; + --color-prettylights-syntax-carriage-return-bg: #cf222e; + --color-prettylights-syntax-string-regexp: #116329; + --color-prettylights-syntax-markup-list: #3b2300; + --color-prettylights-syntax-markup-heading: #0550ae; + --color-prettylights-syntax-markup-italic: #24292f; + --color-prettylights-syntax-markup-bold: #24292f; + --color-prettylights-syntax-markup-deleted-text: #82071e; + --color-prettylights-syntax-markup-deleted-bg: #ffebe9; + --color-prettylights-syntax-markup-inserted-text: #116329; + --color-prettylights-syntax-markup-inserted-bg: #dafbe1; + --color-prettylights-syntax-markup-changed-text: #953800; + --color-prettylights-syntax-markup-changed-bg: #ffd8b5; + --color-prettylights-syntax-markup-ignored-text: #eaeef2; + --color-prettylights-syntax-markup-ignored-bg: #0550ae; + --color-prettylights-syntax-meta-diff-range: #8250df; + --color-prettylights-syntax-brackethighlighter-angle: #57606a; + --color-prettylights-syntax-sublimelinter-gutter-mark: #8c959f; + --color-prettylights-syntax-constant-other-reference-link: #0a3069; + --color-fg-default: #1F2328; + --color-fg-muted: #656d76; + --color-fg-subtle: #6e7781; + --color-canvas-default: #ffffff; + --color-canvas-subtle: #f6f8fa; + --color-border-default: #d0d7de; + --color-border-muted: hsla(210, 18%, 87%, 1); + --color-neutral-muted: rgba(175, 184, 193, 0.2); + --color-accent-fg: #0969da; + --color-accent-emphasis: #0969da; + --color-success-fg: #1a7f37; + --color-success-emphasis: #1f883d; + --color-attention-fg: #9a6700; + --color-attention-emphasis: #9a6700; + --color-attention-subtle: #fff8c5; + --color-danger-fg: #d1242f; + --color-danger-emphasis: #cf222e; + --color-done-fg: #8250df; + --color-done-emphasis: #8250df; + } +} + +.markdown-body { + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; + margin: 0; + color: var(--color-fg-default); + background-color: var(--color-canvas-default); + font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Noto Sans", Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji"; + font-size: 16px; + line-height: 1.5; + word-wrap: break-word; +} + +.markdown-body .octicon { + display: inline-block; + fill: currentColor; + vertical-align: text-bottom; +} + +.markdown-body h1:hover .anchor .octicon-link:before, +.markdown-body h2:hover .anchor .octicon-link:before, +.markdown-body h3:hover .anchor .octicon-link:before, +.markdown-body h4:hover .anchor .octicon-link:before, +.markdown-body h5:hover .anchor .octicon-link:before, +.markdown-body h6:hover .anchor .octicon-link:before { + width: 16px; + height: 16px; + content: ' '; + display: inline-block; + background-color: currentColor; + -webkit-mask-image: url("data:image/svg+xml,"); + mask-image: url("data:image/svg+xml,"); +} + +.markdown-body details, +.markdown-body figcaption, +.markdown-body figure { + display: block; +} + +.markdown-body summary { + display: list-item; +} + +.markdown-body [hidden] { + display: none !important; +} + +.markdown-body a { + background-color: transparent; + color: var(--color-accent-fg); + text-decoration: none; +} + +.markdown-body abbr[title] { + border-bottom: none; + -webkit-text-decoration: underline dotted; + text-decoration: underline dotted; +} + +.markdown-body b, +.markdown-body strong { + font-weight: var(--base-text-weight-semibold, 600); +} + +.markdown-body dfn { + font-style: italic; +} + +.markdown-body h1 { + margin: .67em 0; + font-weight: var(--base-text-weight-semibold, 600); + padding-bottom: .3em; + font-size: 2em; + border-bottom: 1px solid var(--color-border-muted); +} + +.markdown-body mark { + background-color: var(--color-attention-subtle); + color: var(--color-fg-default); +} + +.markdown-body small { + font-size: 90%; +} + +.markdown-body sub, +.markdown-body sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} + +.markdown-body sub { + bottom: -0.25em; +} + +.markdown-body sup { + top: -0.5em; +} + +.markdown-body img { + border-style: none; + max-width: 100%; + box-sizing: content-box; + background-color: var(--color-canvas-default); +} + +.markdown-body code, +.markdown-body kbd, +.markdown-body pre, +.markdown-body samp { + font-family: monospace; + font-size: 1em; +} + +.markdown-body figure { + margin: 1em 40px; +} + +.markdown-body hr { + box-sizing: content-box; + overflow: hidden; + background: transparent; + border-bottom: 1px solid var(--color-border-muted); + height: .25em; + padding: 0; + margin: 24px 0; + background-color: var(--color-border-default); + border: 0; +} + +.markdown-body input { + font: inherit; + margin: 0; + overflow: visible; + font-family: inherit; + font-size: inherit; + line-height: inherit; +} + +.markdown-body [type=button], +.markdown-body [type=reset], +.markdown-body [type=submit] { + -webkit-appearance: button; + appearance: button; +} + +.markdown-body [type=checkbox], +.markdown-body [type=radio] { + box-sizing: border-box; + padding: 0; +} + +.markdown-body [type=number]::-webkit-inner-spin-button, +.markdown-body [type=number]::-webkit-outer-spin-button { + height: auto; +} + +.markdown-body [type=search]::-webkit-search-cancel-button, +.markdown-body [type=search]::-webkit-search-decoration { + -webkit-appearance: none; + appearance: none; +} + +.markdown-body ::-webkit-input-placeholder { + color: inherit; + opacity: .54; +} + +.markdown-body ::-webkit-file-upload-button { + -webkit-appearance: button; + appearance: button; + font: inherit; +} + +.markdown-body a:hover { + text-decoration: underline; +} + +.markdown-body ::placeholder { + color: var(--color-fg-subtle); + opacity: 1; +} + +.markdown-body hr::before { + display: table; + content: ""; +} + +.markdown-body hr::after { + display: table; + clear: both; + content: ""; +} + +.markdown-body table { + border-spacing: 0; + border-collapse: collapse; + display: block; + width: max-content; + max-width: 100%; + overflow: auto; +} + +.markdown-body td, +.markdown-body th { + padding: 0; +} + +.markdown-body details summary { + cursor: pointer; +} + +.markdown-body details:not([open])>*:not(summary) { + display: none !important; +} + +.markdown-body a:focus, +.markdown-body [role=button]:focus, +.markdown-body input[type=radio]:focus, +.markdown-body input[type=checkbox]:focus { + outline: 2px solid var(--color-accent-fg); + outline-offset: -2px; + box-shadow: none; +} + +.markdown-body a:focus:not(:focus-visible), +.markdown-body [role=button]:focus:not(:focus-visible), +.markdown-body input[type=radio]:focus:not(:focus-visible), +.markdown-body input[type=checkbox]:focus:not(:focus-visible) { + outline: solid 1px transparent; +} + +.markdown-body a:focus-visible, +.markdown-body [role=button]:focus-visible, +.markdown-body input[type=radio]:focus-visible, +.markdown-body input[type=checkbox]:focus-visible { + outline: 2px solid var(--color-accent-fg); + outline-offset: -2px; + box-shadow: none; +} + +.markdown-body a:not([class]):focus, +.markdown-body a:not([class]):focus-visible, +.markdown-body input[type=radio]:focus, +.markdown-body input[type=radio]:focus-visible, +.markdown-body input[type=checkbox]:focus, +.markdown-body input[type=checkbox]:focus-visible { + outline-offset: 0; +} + +.markdown-body kbd { + display: inline-block; + padding: 3px 5px; + font: 11px ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas, Liberation Mono, monospace; + line-height: 10px; + color: var(--color-fg-default); + vertical-align: middle; + background-color: var(--color-canvas-subtle); + border: solid 1px var(--color-neutral-muted); + border-bottom-color: var(--color-neutral-muted); + border-radius: 6px; + box-shadow: inset 0 -1px 0 var(--color-neutral-muted); +} + +.markdown-body h1, +.markdown-body h2, +.markdown-body h3, +.markdown-body h4, +.markdown-body h5, +.markdown-body h6 { + margin-top: 24px; + margin-bottom: 16px; + font-weight: var(--base-text-weight-semibold, 600); + line-height: 1.25; +} + +.markdown-body h2 { + font-weight: var(--base-text-weight-semibold, 600); + padding-bottom: .3em; + font-size: 1.5em; + border-bottom: 1px solid var(--color-border-muted); +} + +.markdown-body h3 { + font-weight: var(--base-text-weight-semibold, 600); + font-size: 1.25em; +} + +.markdown-body h4 { + font-weight: var(--base-text-weight-semibold, 600); + font-size: 1em; +} + +.markdown-body h5 { + font-weight: var(--base-text-weight-semibold, 600); + font-size: .875em; +} + +.markdown-body h6 { + font-weight: var(--base-text-weight-semibold, 600); + font-size: .85em; + color: var(--color-fg-muted); +} + +.markdown-body p { + margin-top: 0; + margin-bottom: 10px; +} + +.markdown-body blockquote { + margin: 0; + padding: 0 1em; + color: var(--color-fg-muted); + border-left: .25em solid var(--color-border-default); +} + +.markdown-body ul, +.markdown-body ol { + margin-top: 0; + margin-bottom: 0; + padding-left: 2em; +} + +.markdown-body ol ol, +.markdown-body ul ol { + list-style-type: lower-roman; +} + +.markdown-body ul ul ol, +.markdown-body ul ol ol, +.markdown-body ol ul ol, +.markdown-body ol ol ol { + list-style-type: lower-alpha; +} + +.markdown-body dd { + margin-left: 0; +} + +.markdown-body tt, +.markdown-body code, +.markdown-body samp { + font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas, Liberation Mono, monospace; + font-size: 12px; +} + +.markdown-body pre { + margin-top: 0; + margin-bottom: 0; + font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas, Liberation Mono, monospace; + font-size: 12px; + word-wrap: normal; +} + +.markdown-body .octicon { + display: inline-block; + overflow: visible !important; + vertical-align: text-bottom; + fill: currentColor; +} + +.markdown-body input::-webkit-outer-spin-button, +.markdown-body input::-webkit-inner-spin-button { + margin: 0; + -webkit-appearance: none; + appearance: none; +} + +.markdown-body .mr-2 { + margin-right: var(--base-size-8, 8px) !important; +} + +.markdown-body::before { + display: table; + content: ""; +} + +.markdown-body::after { + display: table; + clear: both; + content: ""; +} + +.markdown-body>*:first-child { + margin-top: 0 !important; +} + +.markdown-body>*:last-child { + margin-bottom: 0 !important; +} + +.markdown-body a:not([href]) { + color: inherit; + text-decoration: none; +} + +.markdown-body .absent { + color: var(--color-danger-fg); +} + +.markdown-body .anchor { + float: left; + padding-right: 4px; + margin-left: -20px; + line-height: 1; +} + +.markdown-body .anchor:focus { + outline: none; +} + +.markdown-body p, +.markdown-body blockquote, +.markdown-body ul, +.markdown-body ol, +.markdown-body dl, +.markdown-body table, +.markdown-body pre, +.markdown-body details { + margin-top: 0; + margin-bottom: 16px; +} + +.markdown-body blockquote>:first-child { + margin-top: 0; +} + +.markdown-body blockquote>:last-child { + margin-bottom: 0; +} + +.markdown-body h1 .octicon-link, +.markdown-body h2 .octicon-link, +.markdown-body h3 .octicon-link, +.markdown-body h4 .octicon-link, +.markdown-body h5 .octicon-link, +.markdown-body h6 .octicon-link { + color: var(--color-fg-default); + vertical-align: middle; + visibility: hidden; +} + +.markdown-body h1:hover .anchor, +.markdown-body h2:hover .anchor, +.markdown-body h3:hover .anchor, +.markdown-body h4:hover .anchor, +.markdown-body h5:hover .anchor, +.markdown-body h6:hover .anchor { + text-decoration: none; +} + +.markdown-body h1:hover .anchor .octicon-link, +.markdown-body h2:hover .anchor .octicon-link, +.markdown-body h3:hover .anchor .octicon-link, +.markdown-body h4:hover .anchor .octicon-link, +.markdown-body h5:hover .anchor .octicon-link, +.markdown-body h6:hover .anchor .octicon-link { + visibility: visible; +} + +.markdown-body h1 tt, +.markdown-body h1 code, +.markdown-body h2 tt, +.markdown-body h2 code, +.markdown-body h3 tt, +.markdown-body h3 code, +.markdown-body h4 tt, +.markdown-body h4 code, +.markdown-body h5 tt, +.markdown-body h5 code, +.markdown-body h6 tt, +.markdown-body h6 code { + padding: 0 .2em; + font-size: inherit; +} + +.markdown-body summary h1, +.markdown-body summary h2, +.markdown-body summary h3, +.markdown-body summary h4, +.markdown-body summary h5, +.markdown-body summary h6 { + display: inline-block; +} + +.markdown-body summary h1 .anchor, +.markdown-body summary h2 .anchor, +.markdown-body summary h3 .anchor, +.markdown-body summary h4 .anchor, +.markdown-body summary h5 .anchor, +.markdown-body summary h6 .anchor { + margin-left: -40px; +} + +.markdown-body summary h1, +.markdown-body summary h2 { + padding-bottom: 0; + border-bottom: 0; +} + +.markdown-body ul.no-list, +.markdown-body ol.no-list { + padding: 0; + list-style-type: none; +} + +.markdown-body ol[type="a s"] { + list-style-type: lower-alpha; +} + +.markdown-body ol[type="A s"] { + list-style-type: upper-alpha; +} + +.markdown-body ol[type="i s"] { + list-style-type: lower-roman; +} + +.markdown-body ol[type="I s"] { + list-style-type: upper-roman; +} + +.markdown-body ol[type="1"] { + list-style-type: decimal; +} + +.markdown-body div>ol:not([type]) { + list-style-type: decimal; +} + +.markdown-body ul ul, +.markdown-body ul ol, +.markdown-body ol ol, +.markdown-body ol ul { + margin-top: 0; + margin-bottom: 0; +} + +.markdown-body li>p { + margin-top: 16px; +} + +.markdown-body li+li { + margin-top: .25em; +} + +.markdown-body dl { + padding: 0; +} + +.markdown-body dl dt { + padding: 0; + margin-top: 16px; + font-size: 1em; + font-style: italic; + font-weight: var(--base-text-weight-semibold, 600); +} + +.markdown-body dl dd { + padding: 0 16px; + margin-bottom: 16px; +} + +.markdown-body table th { + font-weight: var(--base-text-weight-semibold, 600); +} + +.markdown-body table th, +.markdown-body table td { + padding: 6px 13px; + border: 1px solid var(--color-border-default); +} + +.markdown-body table td>:last-child { + margin-bottom: 0; +} + +.markdown-body table tr { + background-color: var(--color-canvas-default); + border-top: 1px solid var(--color-border-muted); +} + +.markdown-body table tr:nth-child(2n) { + background-color: var(--color-canvas-subtle); +} + +.markdown-body table img { + background-color: transparent; +} + +.markdown-body img[align=right] { + padding-left: 20px; +} + +.markdown-body img[align=left] { + padding-right: 20px; +} + +.markdown-body .emoji { + max-width: none; + vertical-align: text-top; + background-color: transparent; +} + +.markdown-body span.frame { + display: block; + overflow: hidden; +} + +.markdown-body span.frame>span { + display: block; + float: left; + width: auto; + padding: 7px; + margin: 13px 0 0; + overflow: hidden; + border: 1px solid var(--color-border-default); +} + +.markdown-body span.frame span img { + display: block; + float: left; +} + +.markdown-body span.frame span span { + display: block; + padding: 5px 0 0; + clear: both; + color: var(--color-fg-default); +} + +.markdown-body span.align-center { + display: block; + overflow: hidden; + clear: both; +} + +.markdown-body span.align-center>span { + display: block; + margin: 13px auto 0; + overflow: hidden; + text-align: center; +} + +.markdown-body span.align-center span img { + margin: 0 auto; + text-align: center; +} + +.markdown-body span.align-right { + display: block; + overflow: hidden; + clear: both; +} + +.markdown-body span.align-right>span { + display: block; + margin: 13px 0 0; + overflow: hidden; + text-align: right; +} + +.markdown-body span.align-right span img { + margin: 0; + text-align: right; +} + +.markdown-body span.float-left { + display: block; + float: left; + margin-right: 13px; + overflow: hidden; +} + +.markdown-body span.float-left span { + margin: 13px 0 0; +} + +.markdown-body span.float-right { + display: block; + float: right; + margin-left: 13px; + overflow: hidden; +} + +.markdown-body span.float-right>span { + display: block; + margin: 13px auto 0; + overflow: hidden; + text-align: right; +} + +.markdown-body code, +.markdown-body tt { + padding: .2em .4em; + margin: 0; + font-size: 85%; + white-space: break-spaces; + background-color: var(--color-neutral-muted); + border-radius: 6px; +} + +.markdown-body code br, +.markdown-body tt br { + display: none; +} + +.markdown-body del code { + text-decoration: inherit; +} + +.markdown-body samp { + font-size: 85%; +} + +.markdown-body pre code { + font-size: 100%; +} + +.markdown-body pre>code { + padding: 0; + margin: 0; + word-break: normal; + white-space: pre; + background: transparent; + border: 0; +} + +.markdown-body .highlight { + margin-bottom: 16px; +} + +.markdown-body .highlight pre { + margin-bottom: 0; + word-break: normal; +} + +.markdown-body .highlight pre, +.markdown-body pre { + padding: 16px; + overflow: auto; + font-size: 85%; + line-height: 1.45; + color: var(--color-fg-default); + background-color: var(--color-canvas-subtle); + border-radius: 6px; +} + +.markdown-body pre code, +.markdown-body pre tt { + display: inline; + max-width: auto; + padding: 0; + margin: 0; + overflow: visible; + line-height: inherit; + word-wrap: normal; + background-color: transparent; + border: 0; +} + +.markdown-body .csv-data td, +.markdown-body .csv-data th { + padding: 5px; + overflow: hidden; + font-size: 12px; + line-height: 1; + text-align: left; + white-space: nowrap; +} + +.markdown-body .csv-data .blob-num { + padding: 10px 8px 9px; + text-align: right; + background: var(--color-canvas-default); + border: 0; +} + +.markdown-body .csv-data tr { + border-top: 0; +} + +.markdown-body .csv-data th { + font-weight: var(--base-text-weight-semibold, 600); + background: var(--color-canvas-subtle); + border-top: 0; +} + +.markdown-body [data-footnote-ref]::before { + content: "["; +} + +.markdown-body [data-footnote-ref]::after { + content: "]"; +} + +.markdown-body .footnotes { + font-size: 12px; + color: var(--color-fg-muted); + border-top: 1px solid var(--color-border-default); +} + +.markdown-body .footnotes ol { + padding-left: 16px; +} + +.markdown-body .footnotes ol ul { + display: inline-block; + padding-left: 16px; + margin-top: 16px; +} + +.markdown-body .footnotes li { + position: relative; +} + +.markdown-body .footnotes li:target::before { + position: absolute; + top: -8px; + right: -8px; + bottom: -8px; + left: -24px; + pointer-events: none; + content: ""; + border: 2px solid var(--color-accent-emphasis); + border-radius: 6px; +} + +.markdown-body .footnotes li:target { + color: var(--color-fg-default); +} + +.markdown-body .footnotes .data-footnote-backref g-emoji { + font-family: monospace; +} + +.markdown-body .pl-c { + color: var(--color-prettylights-syntax-comment); +} + +.markdown-body .pl-c1, +.markdown-body .pl-s .pl-v { + color: var(--color-prettylights-syntax-constant); +} + +.markdown-body .pl-e, +.markdown-body .pl-en { + color: var(--color-prettylights-syntax-entity); +} + +.markdown-body .pl-smi, +.markdown-body .pl-s .pl-s1 { + color: var(--color-prettylights-syntax-storage-modifier-import); +} + +.markdown-body .pl-ent { + color: var(--color-prettylights-syntax-entity-tag); +} + +.markdown-body .pl-k { + color: var(--color-prettylights-syntax-keyword); +} + +.markdown-body .pl-s, +.markdown-body .pl-pds, +.markdown-body .pl-s .pl-pse .pl-s1, +.markdown-body .pl-sr, +.markdown-body .pl-sr .pl-cce, +.markdown-body .pl-sr .pl-sre, +.markdown-body .pl-sr .pl-sra { + color: var(--color-prettylights-syntax-string); +} + +.markdown-body .pl-v, +.markdown-body .pl-smw { + color: var(--color-prettylights-syntax-variable); +} + +.markdown-body .pl-bu { + color: var(--color-prettylights-syntax-brackethighlighter-unmatched); +} + +.markdown-body .pl-ii { + color: var(--color-prettylights-syntax-invalid-illegal-text); + background-color: var(--color-prettylights-syntax-invalid-illegal-bg); +} + +.markdown-body .pl-c2 { + color: var(--color-prettylights-syntax-carriage-return-text); + background-color: var(--color-prettylights-syntax-carriage-return-bg); +} + +.markdown-body .pl-sr .pl-cce { + font-weight: bold; + color: var(--color-prettylights-syntax-string-regexp); +} + +.markdown-body .pl-ml { + color: var(--color-prettylights-syntax-markup-list); +} + +.markdown-body .pl-mh, +.markdown-body .pl-mh .pl-en, +.markdown-body .pl-ms { + font-weight: bold; + color: var(--color-prettylights-syntax-markup-heading); +} + +.markdown-body .pl-mi { + font-style: italic; + color: var(--color-prettylights-syntax-markup-italic); +} + +.markdown-body .pl-mb { + font-weight: bold; + color: var(--color-prettylights-syntax-markup-bold); +} + +.markdown-body .pl-md { + color: var(--color-prettylights-syntax-markup-deleted-text); + background-color: var(--color-prettylights-syntax-markup-deleted-bg); +} + +.markdown-body .pl-mi1 { + color: var(--color-prettylights-syntax-markup-inserted-text); + background-color: var(--color-prettylights-syntax-markup-inserted-bg); +} + +.markdown-body .pl-mc { + color: var(--color-prettylights-syntax-markup-changed-text); + background-color: var(--color-prettylights-syntax-markup-changed-bg); +} + +.markdown-body .pl-mi2 { + color: var(--color-prettylights-syntax-markup-ignored-text); + background-color: var(--color-prettylights-syntax-markup-ignored-bg); +} + +.markdown-body .pl-mdr { + font-weight: bold; + color: var(--color-prettylights-syntax-meta-diff-range); +} + +.markdown-body .pl-ba { + color: var(--color-prettylights-syntax-brackethighlighter-angle); +} + +.markdown-body .pl-sg { + color: var(--color-prettylights-syntax-sublimelinter-gutter-mark); +} + +.markdown-body .pl-corl { + text-decoration: underline; + color: var(--color-prettylights-syntax-constant-other-reference-link); +} + +.markdown-body g-emoji { + display: inline-block; + min-width: 1ch; + font-family: "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 1em; + font-style: normal !important; + font-weight: var(--base-text-weight-normal, 400); + line-height: 1; + vertical-align: -0.075em; +} + +.markdown-body g-emoji img { + width: 1em; + height: 1em; +} + +.markdown-body .task-list-item { + list-style-type: none; +} + +.markdown-body .task-list-item label { + font-weight: var(--base-text-weight-normal, 400); +} + +.markdown-body .task-list-item.enabled label { + cursor: pointer; +} + +.markdown-body .task-list-item+.task-list-item { + margin-top: 4px; +} + +.markdown-body .task-list-item .handle { + display: none; +} + +.markdown-body .task-list-item-checkbox { + margin: 0 .2em .25em -1.4em; + vertical-align: middle; +} + +.markdown-body .contains-task-list:dir(rtl) .task-list-item-checkbox { + margin: 0 -1.6em .25em .2em; +} + +.markdown-body .contains-task-list { + position: relative; +} + +.markdown-body .contains-task-list:hover .task-list-item-convert-container, +.markdown-body .contains-task-list:focus-within .task-list-item-convert-container { + display: block; + width: auto; + height: 24px; + overflow: visible; + clip: auto; +} + +.markdown-body ::-webkit-calendar-picker-indicator { + filter: invert(50%); +} + +.markdown-body .markdown-alert { + padding: var(--base-size-8) var(--base-size-16); + margin-bottom: 16px; + color: inherit; + border-left: .25em solid var(--color-border-default); +} + +.markdown-body .markdown-alert>:first-child { + margin-top: 0; +} + +.markdown-body .markdown-alert>:last-child { + margin-bottom: 0; +} + +.markdown-body .markdown-alert .markdown-alert-title { + display: flex; + font-weight: var(--base-text-weight-medium, 500); + align-items: center; + line-height: 1; +} + +.markdown-body .markdown-alert.markdown-alert-note { + border-left-color: var(--color-accent-emphasis); +} + +.markdown-body .markdown-alert.markdown-alert-note .markdown-alert-title { + color: var(--color-accent-fg); +} + +.markdown-body .markdown-alert.markdown-alert-important { + border-left-color: var(--color-done-emphasis); +} + +.markdown-body .markdown-alert.markdown-alert-important .markdown-alert-title { + color: var(--color-done-fg); +} + +.markdown-body .markdown-alert.markdown-alert-warning { + border-left-color: var(--color-attention-emphasis); +} + +.markdown-body .markdown-alert.markdown-alert-warning .markdown-alert-title { + color: var(--color-attention-fg); +} + +.markdown-body .markdown-alert.markdown-alert-tip { + border-left-color: var(--color-success-emphasis); +} + +.markdown-body .markdown-alert.markdown-alert-tip .markdown-alert-title { + color: var(--color-success-fg); +} + +.markdown-body .markdown-alert.markdown-alert-caution { + border-left-color: var(--color-danger-emphasis); +} + +.markdown-body .markdown-alert.markdown-alert-caution .markdown-alert-title { + color: var(--color-danger-fg); +} \ No newline at end of file diff --git a/docs/index.html b/docs/index.html new file mode 100755 index 0000000..d1154b4 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,1250 @@ + + + + + + + + + + Search Code By Comment + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+
+ + +
+

+
+ Document index of: +
+
+ + + +
+

+
+ + +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/metadata.json b/docs/metadata.json new file mode 100644 index 0000000..f7d32dd --- /dev/null +++ b/docs/metadata.json @@ -0,0 +1,65 @@ +{ + "url": { + "full": "https://github.com/kyegomez/tree-of-thoughts", + "partial": "kyegomez/tree-of-thoughts" + }, + "file_mapping": { + "0": { + "filepath": "/README.md", + "entry_id": 0, + "language_id": "markdown" + }, + "1": { + "filepath": "/example.py", + "entry_id": 14, + "language_id": "python" + }, + "2": { + "filepath": "/prompts.txt", + "entry_id": 20, + "language_id": "plain-text" + }, + "3": { + "filepath": "/pyproject.toml", + "entry_id": 28, + "language_id": "toml" + }, + "4": { + "filepath": "/requirements.txt", + "entry_id": 34, + "language_id": "plain-text" + }, + "5": { + "filepath": "/tree_of_thoughts/README.md", + "entry_id": 38, + "language_id": "markdown" + }, + "6": { + "filepath": "/tree_of_thoughts/__init__.py", + "entry_id": 60, + "language_id": "python" + }, + "7": { + "filepath": "/tree_of_thoughts/base.py", + "entry_id": 64, + "language_id": "python" + }, + "8": { + "filepath": "/tree_of_thoughts/huggingface_model.py", + "entry_id": 68, + "language_id": "python" + }, + "9": { + "filepath": "/tree_of_thoughts/openai_models.py", + "entry_id": 76, + "language_id": "python" + }, + "10": { + "filepath": "/tree_of_thoughts/treeofthoughts.py", + "entry_id": 94, + "language_id": "python" + } + }, + "project_name": "tree-of-thoughts", + "split_count": 2 +} \ No newline at end of file diff --git a/docs/metadata_title.json b/docs/metadata_title.json new file mode 100644 index 0000000..0103dd2 --- /dev/null +++ b/docs/metadata_title.json @@ -0,0 +1 @@ +{"split_count": 1} \ No newline at end of file diff --git a/docs/sitemap.xml b/docs/sitemap.xml new file mode 100644 index 0000000..f0cf557 --- /dev/null +++ b/docs/sitemap.xml @@ -0,0 +1,79 @@ + + + + + + + + https://james4ever0.github.io/tree-of-thoughts?q=/README.md + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/example.py + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/prompts.txt + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/pyproject.toml + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/requirements.txt + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/tree_of_thoughts/README.md + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/tree_of_thoughts/__init__.py + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/tree_of_thoughts/base.py + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/tree_of_thoughts/huggingface_model.py + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/tree_of_thoughts/openai_models.py + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts?q=/tree_of_thoughts/treeofthoughts.py + 2023-12-28T09:21:02+00:00 + 1.00 + + + + https://james4ever0.github.io/tree-of-thoughts/tree.html?full=true + 2023-12-28T09:21:02+00:00 + 1.00 + + + \ No newline at end of file diff --git a/docs/src/README.md b/docs/src/README.md new file mode 100644 index 0000000..8404077 --- /dev/null +++ b/docs/src/README.md @@ -0,0 +1,162 @@ +[![Multi-Modality](agorabanner.png)](https://discord.gg/qUtxnK2NMf) + +![Tree of Thoughts Banner](treeofthoughts.png) + +![Discord](https://img.shields.io/discord/999382051935506503) +[![Twitter](https://img.shields.io/twitter/url?style=social&url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts)](https://twitter.com/intent/tweet?text=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts!%20https://github.com/kyegomez/tree-of-thoughts) +[![LinkedIn](https://img.shields.io/badge/Share-LinkedIn-blue?style=social&logo=linkedin)](https://www.linkedin.com/sharing/share-offsite/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts) +[![Facebook](https://img.shields.io/badge/Share-Facebook-blue?style=social&logo=facebook)](https://www.facebook.com/sharer/sharer.php?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts) +[![Reddit](https://img.shields.io/badge/Share-Reddit-orange?style=social&logo=reddit)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&title=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21) +[![Hacker News](https://img.shields.io/badge/Share-Hacker%20News-orange?style=social&logo=y-combinator)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&t=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21) +[![Pinterest](https://img.shields.io/badge/Share-Pinterest-red?style=social&logo=pinterest)](https://pinterest.com/pin/create/button/?url=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts&media=https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts%2Fraw%2Fmain%2Ftree-of-thoughts.jpeg&description=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21) +[![WhatsApp](https://img.shields.io/badge/Share-WhatsApp-green?style=social&logo=whatsapp)](https://api.whatsapp.com/send?text=Check%20out%20this%20amazing%20project%20on%20improving%20AI%20reasoning%20-%20Tree%20of%20Thoughts%21%20https%3A%2F%2Fgithub.com%2Fkyegomez%2Ftree-of-thoughts) + + +[Paper link](https://arxiv.org/pdf/2305.10601.pdf) +[Author's implementation](https://github.com/princeton-nlp/tree-of-thought-llm) + +## Introduction + +Tree of Thoughts (ToT) is a powerful and flexible algorithm that significantly advances model reasoning by up to 70%. This plug-and-play version allows you to connect your own models and experience superintelligence! + + +## Install + +```bash +pip install tree-of-thoughts +``` + +## Usage +```python +import os +from tree_of_thoughts.openai_models import OpenAILanguageModel +from tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts +from dotenv import load_dotenv + +load_dotenv() + + +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the OpenAILanguageModel class with the API key +model = OpenAILanguageModel(api_key=api_key) + + +# Initialize the MonteCarloTreeofThoughts class with the model +tree_of_thoughts = MonteCarloTreeofThoughts(model) + +# Define the initial prompt +initial_prompt = """ + + +Input: 2 8 8 14 +Possible next steps: +2 + 8 = 10 (left: 8 10 14) +8 / 2 = 4 (left: 4 8 14) +14 + 2 = 16 (left: 8 8 16) +2 * 8 = 16 (left: 8 14 16) +8 - 2 = 6 (left: 6 8 14) +14 - 8 = 6 (left: 2 6 8) +14 / 2 = 7 (left: 7 8 8) +14 - 2 = 12 (left: 8 8 12) +Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation +Possible next steps: +""" + +# Define the number of thoughts to generate +num_thoughts = 1 +max_steps = 3 +max_states = 4 +pruning_threshold = 0.5 + + +# Generate the thoughts +solution = tree_of_thoughts.solve( + initial_prompt=initial_prompt, + num_thoughts=num_thoughts, + max_steps=max_steps, + max_states=max_states, + pruning_threshold=pruning_threshold, + # sleep_time=sleep_time +) + +print(f"Solution: {solution}") + +``` + + +### ToT with HF LLM + +To run Hugging Face Transformers with Tree of Thoughts: +```python +from tree_of_thoughts import TreeofThoughts, HuggingLanguageModel, MonteCarloTreeofThoughts + +model_name="01-ai/Yi-34B" + +model = HuggingLanguageModel(model_name, + model_tokenizer=model_name, + verbose=True) + + +# Initialize the MonteCarloTreeofThoughts class with the model +tree_of_thoughts = MonteCarloTreeofThoughts(model) + +# Note to reproduce the same results from the tree of thoughts paper if not better, +# craft an 1 shot chain of thought prompt for your task below + +initial_prompt = """ + + +Input: 2 8 8 14 +Possible next steps: +2 + 8 = 10 (left: 8 10 14) +8 / 2 = 4 (left: 4 8 14) +14 + 2 = 16 (left: 8 8 16) +2 * 8 = 16 (left: 8 14 16) +8 - 2 = 6 (left: 6 8 14) +14 - 8 = 6 (left: 2 6 8) +14 / 2 = 7 (left: 7 8 8) +14 - 2 = 12 (left: 8 8 12) +Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation +Possible next steps: + + + +""" +num_thoughts = 1 +max_steps = 3 +max_states = 4 +pruning_threshold = 0.5 + + + + +solution = tree_of_thoughts.solve( + initial_prompt=initial_prompt, + num_thoughts=num_thoughts, + max_steps=max_steps, + max_states=max_states, + pruning_threshold=pruning_threshold, + # sleep_time=sleep_time +) + +print(f"Solution: {solution}") +``` + +### Basic Prompts +- Copy and paste this into your llm! + +``` +"Three experts with exceptional logical thinking skills are collaboratively answering a question using the tree of thoughts method. Each expert will share their thought process in detail, taking into account the previous thoughts of others and admitting any errors. They will iteratively refine and expand upon each other's ideas, giving credit where it's due. The process continues until a conclusive answer is found. Organize the entire response in a markdown table format. The task is: +``` + + + +# Acknowledgements + +Thanks to: Shunyu Yao Princeton University, Dian Yu Google DeepMind, Jeffrey Zhao, Google DeepMind, Izhak Shafran Google DeepMind, Thomas L. Griffiths, Princeton University, Yuan Cao Google DeepMind, Karthik Narasimha, Princeton University for sharing this amazing work with the world! + +And, thanks to Phil Wang or Lucidrains for inspiring me to devote myself to open source AI Research + +# License +Apache \ No newline at end of file diff --git a/docs/src/example.py b/docs/src/example.py new file mode 100644 index 0000000..ad847fa --- /dev/null +++ b/docs/src/example.py @@ -0,0 +1,53 @@ +import os +from tree_of_thoughts.openai_models import OpenAILanguageModel +from tree_of_thoughts.treeofthoughts import MonteCarloTreeofThoughts +from dotenv import load_dotenv + +load_dotenv() + + +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the OpenAILanguageModel class with the API key +model = OpenAILanguageModel(api_key=api_key) + + +# Initialize the MonteCarloTreeofThoughts class with the model +tree_of_thoughts = MonteCarloTreeofThoughts(model) + +# Define the initial prompt +initial_prompt = """ + + +Input: 2 8 8 14 +Possible next steps: +2 + 8 = 10 (left: 8 10 14) +8 / 2 = 4 (left: 4 8 14) +14 + 2 = 16 (left: 8 8 16) +2 * 8 = 16 (left: 8 14 16) +8 - 2 = 6 (left: 6 8 14) +14 - 8 = 6 (left: 2 6 8) +14 / 2 = 7 (left: 7 8 8) +14 - 2 = 12 (left: 8 8 12) +Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation +Possible next steps: +""" + +# Define the number of thoughts to generate +num_thoughts = 1 +max_steps = 3 +max_states = 4 +pruning_threshold = 0.5 + + +# Generate the thoughts +solution = tree_of_thoughts.solve( + initial_prompt=initial_prompt, + num_thoughts=num_thoughts, + max_steps=max_steps, + max_states=max_states, + pruning_threshold=pruning_threshold, + # sleep_time=sleep_time +) + +print(f"Solution: {solution}") diff --git a/docs/src/prompts.txt b/docs/src/prompts.txt new file mode 100644 index 0000000..87fa70a --- /dev/null +++ b/docs/src/prompts.txt @@ -0,0 +1,9 @@ +Imagine three different experts are answering this question. All experts will write down 1 step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is... + +Simulate three brilliant, logical experts collaboratively answering a question. Each one verbosely explains their thought process in real-time, considering the prior explanations of others and openly acknowledging mistakes. At each step, whenever possible, each expert refines and builds upon the thoughts of others, acknowledging their contributions. They continue until there is a definitive answer to the question. For clarity, your entire response should be in a markdown table. The question is... + +Imagine three highly intelligent experts working together to answer a question. They will follow a tree of thoughts approach, where each expert shares their thought process step by step. They will consider the input from others, refine their thoughts, and build upon the group's collective knowledge. If an expert realizes their thought is incorrect, they will acknowledge it and withdraw from the discussion. Continue this process until a definitive answer is reached. Present the entire response in a markdown table. The question is... + +Three experts with exceptional logical thinking skills are collaboratively answering a question using a tree of thoughts method. Each expert will share their thought process in detail, taking into account the previous thoughts of others and admitting any errors. They will iteratively refine and expand upon each other's ideas, giving credit where it's due. The process continues until a conclusive answer is found. Organize the entire response in a markdown table format. The question is... + +Envision a group of three experts working in unison to tackle a question by employing a tree of thoughts strategy. Each expert will thoroughly explain their line of thinking at every step, while also considering the insights provided by their peers. They will openly recognize any mistakes and build upon the group's shared understanding. This iterative process will continue until a definitive solution is reached. Structure the entire response as a markdown table. The question is... \ No newline at end of file diff --git a/docs/src/pyproject.toml b/docs/src/pyproject.toml new file mode 100644 index 0000000..f7702f2 --- /dev/null +++ b/docs/src/pyproject.toml @@ -0,0 +1,54 @@ +[tool.poetry] +name = "tree-of-thoughts" +version = "0.3.9" +description = "Tree of Thoughts - Pytorch" +authors = ["Kye Gomez "] +license = "MIT" +readme = "README.md" # Assuming you have a README.md file +homepage = "https://github.com/kyegomez/tree-of-thoughts" +keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3.6", +] + +[tool.poetry.dependencies] +python = "^3.6" +transformers = "*" +swarms = "*" + +[tool.poetry.dev-dependencies] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + + +[tool.poetry.group.lint.dependencies] +ruff = "^0.0.249" +types-toml = "^0.10.8.1" +types-redis = "^4.3.21.6" +types-pytz = "^2023.3.0.0" +black = "^23.1.0" +types-chardet = "^5.0.4.6" +mypy-protobuf = "^3.0.0" + + +[tool.autopep8] +max_line_length = 80 +ignore = "E501,W6" # or ["E501", "W6"] +in-place = true +recursive = true +aggressive = 3 + +[tool.ruff] +line-length = 80 + +[tool.black] +line-length = 80 +target-version = ['py38'] +preview = true + diff --git a/docs/src/requirements.txt b/docs/src/requirements.txt new file mode 100644 index 0000000..8467660 --- /dev/null +++ b/docs/src/requirements.txt @@ -0,0 +1,4 @@ +transformers +openai +langchain +swarms diff --git a/docs/src/tree_of_thoughts/README.md b/docs/src/tree_of_thoughts/README.md new file mode 100644 index 0000000..d8507f8 --- /dev/null +++ b/docs/src/tree_of_thoughts/README.md @@ -0,0 +1,169 @@ +# Comprehensive Documentation and Changelog +This document provides a comprehensive overview of the changes made to the TreeofThoughts class and its methods to improve readability and understandability. The changes include updating variable names to be more meaningful and descriptive, as well as modifying the structure of the code for better readability. + +## Changelog +1. TreeofThoughts Class +Updated the class definition to include a more descriptive docstring. +2. __init__ Method +No changes were made to the __init__ method. +3. solve Method +Updated variable names: +x -> initial_prompt +k -> num_thoughts +T -> max_steps +b -> max_states +vth -> value_threshold +4. tot_bfs Method +Updated variable names: +x -> initial_prompt +k -> num_thoughts +T -> max_steps +b -> max_states +S0 -> current_states +S0_t -> generated_states +Vt -> state_values +St -> selected_states +5. tot_dfs Method +Updated variable names: + +x -> initial_prompt +k -> num_thoughts +T -> max_steps +vth -> value_threshold +s -> state +t -> step +s_prime -> next_state +child -> child_state + +### Added optional parameters for better control over the search process: +pruning_threshold +confidence_threshold +max_iterations +convergence_threshold +convergence_count +6. save_tree_to_json Method +No changes were made to the save_tree_to_json method. +7. print_tree Method +No changes were made to the print_tree method. + +# Documentation +TreeofThoughts Class +The TreeofThoughts class is designed to solve problems using a tree-based search algorithm. It takes a model and a search algorithm (either 'BFS' or 'DFS') as input and provides methods to solve problems using the chosen algorithm. + +## Initialization +The __init__ method initializes the TreeofThoughts class with the given model and search algorithm. It also initializes an empty tree structure to store the search results. + +## Solve Method +The solve method is the main entry point for solving problems using the TreeofThoughts class. It takes the following parameters: + +initial_prompt: The initial problem or prompt to be solved. +num_thoughts: The number of thoughts to generate at each step. +max_steps: The maximum number of steps to perform in the search. +max_states: The maximum number of states to consider at each step (for BFS). +value_threshold: The threshold value for pruning states (for DFS). +timeout: The maximum time allowed for the search process. +confidence_threshold: The confidence threshold for stopping the search. +max_iterations: The maximum number of iterations allowed for the search. +convergence_threshold: The threshold for determining convergence. +convergence_count: The number of consecutive convergences required to stop the search. +Based on the chosen search algorithm, the solve method calls either the tot_bfs or tot_dfs method to perform the search. + +## tot_bfs Method +The tot_bfs method performs a breadth-first search to solve the problem. It takes the following parameters: + +initial_prompt: The initial problem or prompt to be solved. +num_thoughts: The number of thoughts to generate at each step. +max_steps: The maximum number of steps to perform in the search. +max_states: The maximum number of states to consider at each step. +pruning_threshold: The threshold value for pruning states. +The method generates and evaluates states at each step, selecting the best states based on their values. The search continues until the maximum number of steps is reached, and the best state is returned. + +## tot_dfs Method +The tot_dfs method performs a depth-first search to solve the problem. It takes the following parameters: + +initial_prompt: The initial problem or prompt to be solved. +num_thoughts: The number of thoughts to generate at each step. +max_steps: The maximum number of steps to perform in the search. + +value_threshold: The threshold value for pruning states. +pruning_threshold: The threshold value for pruning states based on their values. +confidence_threshold: The confidence threshold for stopping the search. +max_iterations: The maximum number of iterations allowed for the search. +convergence_threshold: The threshold for determining convergence. +convergence_count: The number of consecutive convergences required to stop the search. +The method uses a recursive depth-first search approach to explore the state space. It generates and evaluates states at each step, and if a state's value is above the value_threshold and pruning_threshold, it continues the search with the new state. The search stops when the maximum number of steps is reached, the confidence threshold is met, or the convergence criteria are satisfied. The best state is then returned. + +## save_tree_to_json Method +The save_tree_to_json method saves the current tree structure and metrics to a JSON file. It takes the following parameter: + +file_name: The name of the JSON file to save the tree structure and metrics. +This method is useful for logging the search process and analyzing the results later. + +## print_tree Method +The print_tree method prints the tree structure in a human-readable format. It takes the following parameters: + +node: The current node in the tree. +depth: The depth of the current node in the tree (default is 0). +This method is useful for visualizing the tree structure and understanding the search process. + +## Usage +To use the TreeofThoughts class, follow these steps: + +Initialize the class with a model and a search algorithm (either 'BFS' or 'DFS'). +Call the solve method with the required parameters to perform the search and obtain the best state. +(Optional) Use the save_tree_to_json method to save the tree structure and metrics to a JSON file. +(Optional) Use the print_tree method to visualize the tree structure. +Here's an example of how to use the TreeofThoughts class: + + + +# V2 with Monte Carlo, A* Search Algorithm, BFS, Best First Search +### Class: TreeofThoughts +This class represents the base class for the Tree of Thoughts search algorithm. It contains the following methods: + +- `__init__(self, model)`: Initializes the TreeofThoughts object with the given model. +- `save_tree_to_json(self, file_name)`: Saves the tree to a JSON file with the given file name. +- `logNewState(self, state, evaluation)`: Logs a new state and its evaluation to the tree. +- `adjust_pruning_threshold_precentile(self, evaluated_thoughts, percentile)`: Adjusts the pruning threshold based on the percentile of evaluated thoughts. +- `adjust_pruning_threshold_moving_average(self, evaluated_thoughts, window_size)`: Adjusts the pruning threshold based on the moving average of evaluated thoughts. + +### Class: TreeofThoughtsBFS +This class represents the Breadth-First Search (BFS) variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method: + +- `solve(self, initial_prompt, num_thoughts, max_steps, max_states, value_threshold, pruning_threshold=0.5)`: Solves the problem using BFS with the given parameters. + +### Class: TreeofThoughtsDFS +This class represents the Depth-First Search (DFS) variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method: + +- `solve(self, initial_prompt, num_thoughts, max_steps, value_threshold, pruning_threshold=0.5)`: Solves the problem using DFS with the given parameters. + +### Class: TreeofThoughtsBEST +This class represents the Best-First Search variant of the Tree of Thoughts search algorithm. It contains the following methods: + +- `__init__(self, model)`: Initializes the TreeofThoughtsBEST object with the given model. +- `save_tree_to_json(self, file_name)`: Saves the tree to a JSON file with the given file name. +- `log_new_state(self, state, evaluation)`: Logs a new state and its evaluation to the tree. +- `solve(self, initial_prompt, num_thoughts, max_steps, pruning_threshold)`: Solves the problem using Best-First Search with the given parameters. + +### Class: TreeofThoughtsASearch +This class represents the A* Search variant of the Tree of Thoughts search algorithm. It contains the following methods: + +- `__init__(self, model)`: Initializes the TreeofThoughtsASearch object with the given model. +- `solve(self, initial_prompt, num_thoughts=5, max_steps=30, pruning_threshold=0.4)`: Solves the problem using A* Search with the given parameters. +- `is_goal(self, state, score)`: Determines if the given state is a goal state based on its score. +- `reconstruct_path(self, came_from, current_state, initial_prompt)`: Reconstructs the path from the initial state to the current state using the came_from dictionary. + +### Class: MonteCarloTreeofThoughts +This class represents the Monte Carlo Tree Search variant of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following methods: + +- `__init__(self, model, objective="balance")`: Initializes the MonteCarloTreeofThoughts object with the given model and objective. +- `optimize_params(self, num_thoughts, max_steps, max_states)`: Optimizes the search parameters based on the objective. +- `solve(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold)`: Solves the problem using + + Monte Carlo Tree Search with the given parameters. +- `monte_carlo_search(self, initial_prompt, num_thoughts, max_steps, max_states, pruning_threshold)`: Performs the Monte Carlo Tree Search with the given parameters. + +### Class: OptimizedTreeofThoughts +This class represents an optimized version of the Tree of Thoughts search algorithm. It inherits from the TreeofThoughts class and contains the following method: + +- `solve(self, x, k=None, T=None, b=None, vth=None, timeout=None, confidence_threshold=None, max_iterations=None, convergence_threshold=None, convergence_count=None)`: Solves the problem using an optimized search algorithm with the given parameters. \ No newline at end of file diff --git a/docs/src/tree_of_thoughts/__init__.py b/docs/src/tree_of_thoughts/__init__.py new file mode 100644 index 0000000..9efeb16 --- /dev/null +++ b/docs/src/tree_of_thoughts/__init__.py @@ -0,0 +1,27 @@ +from tree_of_thoughts.base import AbstractLanguageModel +#from tree_of_thoughts.huggingface_model import ( + #HuggingLanguageModel, +#) +from tree_of_thoughts.openai_models import ( + OpenAILanguageModel, +) +from tree_of_thoughts.treeofthoughts import ( + MonteCarloTreeofThoughts, + TreeofThoughts, + TreeofThoughtsASearch, + TreeofThoughtsBEST, + TreeofThoughtsBFS, + TreeofThoughtsDFS, +) + +__all__ = [ + "OpenAILanguageModel", + "TreeofThoughts", + "MonteCarloTreeofThoughts", + "TreeofThoughtsBFS", + "TreeofThoughtsDFS", + "TreeofThoughtsBEST", + "TreeofThoughtsASearch", + "AbstractLanguageModel", + "HuggingLanguageModel", +] diff --git a/docs/src/tree_of_thoughts/base.py b/docs/src/tree_of_thoughts/base.py new file mode 100644 index 0000000..6b5f316 --- /dev/null +++ b/docs/src/tree_of_thoughts/base.py @@ -0,0 +1,11 @@ +from abc import ABC, abstractmethod + + +class AbstractLanguageModel(ABC): + @abstractmethod + def generate_thoughts(self, state, k): + pass + + @abstractmethod + def evaluate_states(self, states): + pass diff --git a/docs/src/tree_of_thoughts/huggingface_model.py b/docs/src/tree_of_thoughts/huggingface_model.py new file mode 100644 index 0000000..c5e3e4c --- /dev/null +++ b/docs/src/tree_of_thoughts/huggingface_model.py @@ -0,0 +1,64 @@ +from swarms.models import HuggingfaceLLM + + +class HuggingLanguageModel: + def __init__( + self, model_name, model_tokenizer=None, verbose=False, *args, **kwargs + ): + self.model = HuggingfaceLLM(model_name, *args, **kwargs) + self.verbose = verbose + + def generate_thoughts(self, state, k, max_length=100): + state_text = " ".join(state) + prompt = ( + "Write down your observations in format 'Observation:xxxx', then" + " write down your thoughts in format 'Thoughts:xxxx Given the" + f" current state of reasoning: '{state_text}', generate" + f" {k} coherent solutions to achieve {state_text}" + ) + + if self.verbose: + print(f"Generating thoughts for state: {state_text}") + + try: + self.model.run(prompt) + except Exception as e: + if self.verbose: + print(f"Error generating thoughts for state: {state_text}") + print(f"Error: {e}") + thoughts = [] + + return thoughts + + def evaluate_states(self, states, initial_prompt, max_length=10): + state_values = {} + for state in states: + state_text = " ".join(state) + prompt = ( + f"Given the current state of reasoning: '{state_text}'," + " pessimitically evaluate its value as a float between 0 and 1" + f" based on it's potential to achieve {initial_prompt}" + ) + + if self.verbose: + print(f"Evaluating state: {state_text}") + + try: + value_text = self.model(prompt) + value = float(value_text) + except ValueError: + if self.verbose: + print( + "Error converting value to float for state:" + f" {state_text}" + ) + value = 0 # Assign a default value if the conversion fails + except Exception as e: + if self.verbose: + print(f"Error evaluating state: {state_text}") + print(f"Error: {e}") + value = 0 + + state_values[state] = value + + return state_values diff --git a/docs/src/tree_of_thoughts/openai_models.py b/docs/src/tree_of_thoughts/openai_models.py new file mode 100644 index 0000000..138aed2 --- /dev/null +++ b/docs/src/tree_of_thoughts/openai_models.py @@ -0,0 +1,186 @@ +import logging +from tree_of_thoughts.base import AbstractLanguageModel +from swarms.models import OpenAIChat + +# Logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +class OpenAILanguageModel(AbstractLanguageModel): + """ + + OpenAI Language Model + + + Args: + api_key (str): OpenAI API key + strategy (str): Strategy for generating thoughts. Choose from 'cot' (Chain of Thoughts) or 'gpt' (GPT-3) + evaluation_strategy (str): Strategy for evaluating thoughts. Choose from 'value' or 'vote' + api_base (str): Base path for OpenAI API + api_model (str): Model name for OpenAI API + enable_ReAct_prompting (bool): Enable ReAct prompting + + Examples: + >>> from tree_of_thoughts.models.openai_models import OpenAILanguageModel + >>> model = OpenAILanguageModel(api_key=api_key) + >>> model.generate_thoughts(state, k) + >>> model.evaluate_states(states, initial_prompt) + + """ + + def __init__( + self, + api_key, + strategy="cot", + evaluation_strategy="value", + enable_ReAct_prompting=True, + *args, + **kwargs, + ): + self.api_key = api_key + self.use_chat_api = True + self.enable_ReAct_prompting = enable_ReAct_prompting + self.strategy = strategy + self.evaluation_strategy = evaluation_strategy + + # reference : https://www.promptingguide.ai/techniques/react + self.ReAct_prompt = "" + if enable_ReAct_prompting: + self.ReAct_prompt = ( + "Write down your observations in format 'Observation:xxxx'," + " then write down your thoughts in format 'Thoughts:xxxx'." + ) + + self.model = OpenAIChat(openai_api_key=api_key, *args, **kwargs) + + def generate_text(self, prompt: str, k: int = 3): + """Generate text from prompt using OpenAI API""" + if self.use_chat_api: + thoughts = [] + for _ in range(k): + response = self.model(prompt) + thoughts += [response] + # print(f'thoughts: {thoughts}') + return thoughts + + def generate_thoughts( + self, state, k, initial_prompt, rejected_solutions=None + ): + """ + Generate thoughts from state using OpenAI API + + Args: + state (str or list): State of reasoning + k (int): Number of thoughts to generate + initial_prompt (str): Initial prompt + rejected_solutions (list): List of rejected solutions + + Returns: + list: List of thoughts + + + + """ + if type(state) == str: + state_text = state + else: + state_text = "\n".join(state) + print("New state generating thought:", state, "\n\n") + prompt = f"""You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. + Considering the reasoning provided:\n\n + ###'{state_text}'\n\n### + Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: + ###{rejected_solutions}###, + complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.""" + + prompt += self.ReAct_prompt + thoughts = self.generate_text(prompt, k) + return thoughts + + def generate_solution(self, initial_prompt, state, rejected_solutions=None): + try: + if isinstance(state, list): + state_text = "\n".join(state) + else: + state_text = state + + prompt = f"""You're an TreeofThoughts, an superintelligent AI model devoted to helping Humans by any means necessary. You're purpose is to generate a series of solutions to comply with the user's instructions, you must generate solutions on the basis of determining the most reliable solution in the shortest amount of time, while taking rejected solutions into account and learning from them. + Considering the reasoning provided:\n\n + ###'{state_text}'\n\n### + Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected: + ###{rejected_solutions}###, + complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them.""" + answer = self.generate_text(prompt, 1) + print(f"Answerrrrrr {answer}") + # print(thoughts) + # print(f"General Solution : {answer}") + return answer + except Exception as e: + logger.error(f"Error in generate_solutions: {e}") + return None + + def evaluate_states(self, states, initial_prompt): + if not states: + return {} + + if self.evaluation_strategy == "value": + state_values = {} + for state in states: + if type(state) == str: + state_text = state + else: + state_text = "\n".join(state) + print( + "We receive a state of type", + type(state), + "For state: ", + state, + "\n\n", + ) + prompt = f""" To achieve the following goal: '{initial_prompt}', pessimistically value the context of the past solutions and more importantly the latest generated solution you had AS A FLOAT BETWEEN 0 AND 1\n + Past solutions:\n\n + {state_text}\n + If the solutions is not directly concretely making fast progress in achieving the goal, give it a lower score. + Evaluate all solutions AS A FLOAT BETWEEN 0 and 1:\n, DO NOT RETURN ANYTHING ELSE + """ + + response = self.openai_api_call_handler(prompt, 10, 1) + try: + value_text = self.openai_choice2text_handler( + response.choices[0] + ) + # print(f'state: {value_text}') + value = float(value_text) + print(f"Evaluated Thought Value: {value}") + except ValueError: + value = 0 # Assign a default value if the conversion fails + state_values[state] = value + return state_values + + elif self.evaluation_strategy == "vote": + states_text = "\n".join([" ".join(state) for state in states]) + prompt = ( + "Given the following states of reasoning, vote for the best" + " state utilizing an scalar value" + f" 1-10:\n{states_text}\n\nVote, on the probability of this" + f" state of reasoning achieveing {initial_prompt} and become" + " very pessimistic very NOTHING ELSE" + ) + response = self.openai_api_call_handler(prompt, 50, 1) + print(f"state response: {response}") + best_state_text = self.openai_choice2text_handler( + response.choices[0] + ) + print(f"Best state text: {best_state_text}") + best_state = tuple(best_state_text.split()) + print(f"best_state: {best_state}") + + return {state: 1 if state == best_state else 0 for state in states} + + else: + raise ValueError( + "Invalid evaluation strategy. Choose 'value' or 'vote'." + ) diff --git a/docs/src/tree_of_thoughts/treeofthoughts.py b/docs/src/tree_of_thoughts/treeofthoughts.py new file mode 100644 index 0000000..1a54008 --- /dev/null +++ b/docs/src/tree_of_thoughts/treeofthoughts.py @@ -0,0 +1,507 @@ +import concurrent.futures +import json +import logging +import os +import time +from queue import PriorityQueue +from typing import Any, Dict, Union + +import numpy as np + + +DATA_PATH = "./data" + + +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +class TreeofThoughts: + def __init__(self, model): + self.model = model + self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = { + "nodes": {}, + } + self.best_state = None + self.best_value = float("-inf") + self.history = [] # added line initalize history + + def save_tree_to_json(self, file_name): + os.makedirs(os.path.dirname(file_name), exist_ok=True) + with open(file_name, "w") as json_file: + json.dump(self.tree, json_file, indent=4) + + def logNewState(self, state, evaluation): + if not (type(state) == str): + state = " | ".join(state) + if state in self.tree["nodes"]: + self.tree["nodes"][state]["thoughts"].append(evaluation) + else: + self.tree["nodes"][state] = {"thoughts": [evaluation]} + + def adjust_pruning_threshold_precentile( + self, evaluated_thoughts, percentile + ): + values = np.array(list(evaluated_thoughts.values())) + if values.size == 0: + return 0 + return max(np.percentile(values, percentile), 0.1) + + def adjust_pruning_threshold_moving_average( + self, evaluated_thoughts, window_size + ): + values = list(evaluated_thoughts.values()) + if len(values) < window_size: + return np.mean(values) if values else 0 + else: + return max(np.mean(values[-window_size:]), 0.1) + + +###################### + + +class TreeofThoughtsBFS(TreeofThoughts): + def solve( + self, + initial_prompt, + num_thoughts, + max_steps, + max_states, + value_threshold, + pruning_threshold=0.5, + ): + current_states = [initial_prompt] + state_values = {} + dynamic_pruning_threshold = pruning_threshold + + try: + with concurrent.futures.ThreadPoolExecutor() as executor: + for step in range(1, max_steps + 1): + selected_states = [] + for state in current_states: + thoughts = self.model.generate_thoughts( + state, num_thoughts, initial_prompt + ) + futures = [ + executor.submit( + self.model.evaluate_states, + {thought: 0}, + initial_prompt, + ) + for thought in thoughts + ] + concurrent.futures.wait(futures) + evaluated_thoughts = { + thought: fut.result() + for thought, fut in zip(thoughts, futures) + if isinstance(fut.result(), (int, float)) + } # check if result is a number + + if ( + evaluated_thoughts + ): # only adjust if you have evaluated thoughts + dynamic_pruning_threshold = ( + self.adjust_pruning_threshold_moving_average( + evaluated_thoughts, 5 + ) + ) + + for thought, value in evaluated_thoughts.items(): + flattened_state = ( + (state, thought) + if isinstance(state, str) + else (*state, thought) + ) + selected_states.append((flattened_state, value)) + + selected_states.sort(key=lambda x: x[1], reverse=True) + selected_states = selected_states[ + :max_states + ] # Select only the top states + + for state, value in selected_states: + if value >= dynamic_pruning_threshold: + state_values[state] = value + self.logNewState(state, value) + logger.debug(f"State Values: {state_values}") + + # if state_values: + # highest_rated_solution = max(state_values.items(), key=lambda x: x[1]) + # print(f"highest rated solution: {highest_rated_solution}") + # highest_rated_state = highest_rated_solution[0] # Use a different name to avoid confusion + # print(f'highest rated state: {highest_rated_state}') + # try: + # solution = self.model.generate_solution(initial_prompt, highest_rated_state) + # except Exception as e: + # logger.error(f"Error in generating solution: {e}") + # solution = None # Set a fallback value for solution + + # return solution if solution is not None else highest_rated_state # Return highest rated state if solution is None + if state_values: + highest_rated_solution = max( + state_values.items(), key=lambda x: x[1] + ) + highest_rated_state = highest_rated_solution[0] + solution = self.model.generate_solution( + initial_prompt, highest_rated_state + ) + print( + "Highest_rated solution:" + f" {highest_rated_solution} highest_rated_solution:" + f" {highest_rated_solution} Solution: {solution}" + ) + + return solution if solution else highest_rated_state + + else: + return None + + except Exception as e: + logger.error(f"Error in tot_bfs: {e}") + return None + + +########### + + +class TreeofThoughtsDFS(TreeofThoughts): + def solve( + self, + initial_prompt, + num_thoughts, + max_steps, + value_threshold, + pruning_threshold=0.5, + ): + output = [] + + def dfs(state, step): + nonlocal output + if step > max_steps: + thought = self.model.generate_thoughts(state, 1, initial_prompt) + value = self.model.evaluate_states({state}, initial_prompt)[ + state + ] + output.append((thought, value)) + return + + thoughts = self.model.generate_thoughts( + state, self.num_thoughts, initial_prompt + ) + evaluated_thoughts = self.model.evaluate_states( + {thought: 0 for thought in thoughts}, initial_prompt + ) + filtered_thoughts = [ + thought + for thought in thoughts + if evaluated_thoughts[thought] >= self.pruning_threshold + ] + + for next_state in filtered_thoughts: + state_value = self.model.evaluate_states( + {next_state: 0}, initial_prompt + )[next_state] + + if state_value > self.value_threshold: + child = ( + (state, next_state) + if isinstance(state, str) + else (*state, next_state) + ) + dfs(child, step + 1) + + try: + dfs(initial_prompt, 1) + best_state, _ = max(output, key=lambda x: x[1]) + solution = self.model.generate_solution(initial_prompt, best_state) + return solution if solution else best_state + except Exception as e: + logger.error(f"Error in tot_dfs: {e}") + return None + + +# v2 => best first search => explores state space of the quality of the states +# priority que or greedy BFS +class TreeofThoughtsBEST: + def __init__(self, model): + self.model = model + self.tree = {"nodes": {}} + + def save_tree_to_json(self, file_name): + os.makdirs(os.path.dirname(file_name), exist_ok=True) + with open(file_name, "w") as json_file: + json.dump(self.tree, json_file, indent=4) + + def log_new_state(self, state, evaluation): + state_key = " | ".join(state) if isinstance(state, tuple) else state + if state_key in self.tree["nodes"]: + self.tree["nodes"][state_key]["thoughts"].append(evaluation) + else: + self.tree["nodes"]["state_key"] = {"thoughts": [evaluation]} + + def solve(self, initial_prompt, num_thoughts, max_steps, pruning_threshold): + visited_states = set() + state_queue = PriorityQueue() + + state_queue.put((0, initial_prompt)) + + for _ in range(max_steps): + if state_queue.empty(): + break + + _, state = state_queue.get() + + if state in visited_states: + continue + + visited_states.add(state) + + thoughts = self.model.generate_thoughts( + state, num_thoughts, initial_prompt + ) + evaluated_thoughts = { + thought: self.model.evaluate_states( + {thought: 0}, initial_prompt + )[thought] + for thought in thoughts + } + + for thought, value in evaluated_thoughts.items(): + if value >= pruning_threshold: + new_state = ( + (state, thought) + if isinstance(state, str) + else (*state, thought) + ) + state_queue.put((value, new_state)) + self.log_new_state(new_state, value) + + best_state = max(visited_states, key=self.model.evaluate_states) + solution = self.model.generate_solution(initial_prompt, best_state) + print(f"Highest_rated solution: {best_state} Solution: {solution}") + return solution if solution else best_state + + +# A* search algorithm +class TreeofThoughtsASearch: + def __init__(self, model): + self.model = model + + def solve( + self, + initial_prompt, + num_thoughts=5, + max_steps=30, + pruning_threshold=0.4, + ): + # the open set is implemented as a piorituve quue where the priority is -f_score + open_set = PriorityQueue() + open_set.put((0, 0, initial_prompt)) + + # the set of visited_states + visited_states = set() + + # the g_scores and f-scores are stored as dictionaries + g_scores = {initial_prompt: 0} + f_scores = { + initial_prompt: self.model.evaluate_states( + {initial_prompt: 0}, initial_prompt + )[initial_prompt] + } + + # the parent of each state is stored in a dictionary + came_from = {} + + for _ in range(max_steps): + if open_set.empty(): + break + + _, _, current_state = open_set.get() + + if self.is_goal(current_state, f_scores[current_state]): + return self.reconstruct_path( + came_from, current_state, initial_prompt + ) + + thoughts = self.model.generate_thoughts( + current_state, num_thoughts, initial_prompt + ) + evaluated_thoughts = { + thought: self.model.evaluate_states( + {thought: 0}, initial_prompt + )[thought] + for thought in thoughts + } + + for thought, value in evaluated_thoughts.items(): + if value < pruning_threshold or thought in visited_states: + continue + + tentative_g_score = g_scores[current_state] + 1 / value + if ( + thought not in g_scores + or tentative_g_score < g_scores[thought] + ): + came_from[thought] = current_state + g_scores[thought] = tentative_g_score + f_scores[thought] = tentative_g_score + value + open_set.put( + (-f_scores[thought], g_scores[thought], thought) + ) + + return self.reconstruct_path(came_from, current_state, initial_prompt) + + def is_goal(self, state, score): + # if eval state is above 0.9 + return score >= 0.9 + + def reconstruct_path(self, came_from, current_state, initial_prompt): + path = [current_state] + while current_state in came_from: + current_state = came_from[current_state] + path.append(current_state) + path.reverse() + + path = self.reconstruct_path(came_from, current_state, initial_prompt) + solution = self.model.generate_solution(initial_prompt, path) + print(f"Path: {path} solution: {solution}") + return solution if solution else path + + +class MonteCarloTreeofThoughts(TreeofThoughts): + def __init__(self, model, objective="balance"): + super().__init__(model) + self.objective = objective + self.solution_found = False + self.tree: Dict[str, Dict[str, Union[float, Dict[str, Any]]]] = { + "nodes": {}, + "metrics": {"thoughts": {}, "evaluations": {}}, + } + + def optimize_params(self, num_thoughts, max_steps, max_states): + if self.objective == "speed": + num_thoughts = max(1, num_thoughts - 1) + max_steps = max(1, max_steps - 1) + max_states = max(1, max_states - 1) + elif self.objective == "reliability": + num_thoughts += 1 + max_steps += 1 + max_states += 1 + elif self.objective == "balanace": + if self.solution_found: + num_thoughts = max(1, num_thoughts - 1) + max_steps = max(1, max_steps - 1) + max_states = max(1, max_states - 1) + else: + num_thoughts += 1 + max_steps += 1 + max_states += 1 + + return num_thoughts, max_steps, max_states + + def solve( + self, + initial_prompt: str, + num_thoughts: int, + max_steps: int, + max_states: int, + pruning_threshold: float, + # sleep_time: float, + ): + self.file_name = "logs/tree_of_thoughts_output_montecarlo.json" + return self.monte_carlo_search( + initial_prompt, + num_thoughts, + max_steps, + max_states, + pruning_threshold, + # sleep_time, + ) + + # v3 + def monte_carlo_search( + self, + initial_prompt: str, + num_thoughts: int, + max_steps: int, + max_states: int, + pruning_threshold: float, + ): + current_states = [initial_prompt] + state_values = {} + visit_counts = {initial_prompt: 0} + transposition_table = {} + + best_state = None + best_value = float("-inf") + + for step in range(1, max_steps + 1): + selected_states = [] + + for state in current_states: + if state in transposition_table: + transposition_table[state] + else: + time.sleep(1) + thoughts = self.model.generate_thoughts( + state, num_thoughts, initial_prompt + ) + time.sleep(1) + evaluated_thoughts = self.model.evaluate_states( + thoughts, initial_prompt + ) + + for thought, value in evaluated_thoughts.items(): + flattened_state = ( + (state, thought) + if isinstance(state, str) + else (*state, thought) + ) + transposition_table[flattened_state] = value + + for thought, value in evaluated_thoughts.items(): + flattened_state = ( + (state, thought) + if isinstance(state, str) + else (*state, thought) + ) + + if flattened_state not in visit_counts: + visit_counts[flattened_state] = 0 + + if ( + visit_counts[state] > visit_counts[flattened_state] + and visit_counts[flattened_state] > 0 + ): + ucb1_value = value + np.sqrt( + 2 + * np.log(visit_counts[state]) + / visit_counts[flattened_state] + ) + + if ucb1_value >= pruning_threshold: + selected_states.append(flattened_state) + state_values[flattened_state] = value + + # Update the best state if the current state value is greater than the best value + if value > best_value: + best_state = flattened_state + best_value = value + + visit_counts[state] += 1 + + if len(selected_states) > max_states: + current_states = selected_states[:max_states] + self.save_tree_to_json(self.file_name) + + # if best_state is not None: + # solution = self.model.generate_solution(initial_prompt, best_state) + # return solution + # else: + # solution = None + + # return None + solution = self.model.generate_solution(initial_prompt, best_state) + return solution if solution else best_state diff --git a/docs/tree.html b/docs/tree.html new file mode 100644 index 0000000..0589644 --- /dev/null +++ b/docs/tree.html @@ -0,0 +1,138 @@ + + + + + + + + + Project structure of: kyegomez/tree-of-thoughts + + + + + + +
+

Project structure of: kyegomez/tree-of-thoughts

+
    +
  • tree-of-thoughts Integrates AI-powered NLP & search.
      +
    • example.py Monte Carlo Tree of Thoughts solves problems with OpenAI
    • +
    • prompts.txt Collaborative, iterative question-answering using markdown tables.
    • +
    • pyproject.toml Configures project details, dependencies, and linting with Ruff.
    • +
    • README.md Tree-of-thoughts AI reasoning tool with OpenAI
    • +
    • requirements.txt Imported libraries for NLP, text generation, language models, and distributed computing.
    • +
    • tree_of_thoughts Advanced search algorithms for problem-solving.
        +
      • __init__.py Imports language models, thought trees, and search algorithms.
      • +
      • base.py Abstract Base Class for language model generation and evaluation.
      • +
      • huggingface_model.py Huggingface model generates solutions with error handling.
      • +
      • openai_models.py OpenAI Chat API language model generation.
      • +
      • README.md Updated Tree of Thoughts class with search algorithms and optimizations.
      • +
      • treeofthoughts.py Navigates problem-solving with multi-search algorithms.
      • +
      +
    • +
    +
  • +
+
+ + + + \ No newline at end of file diff --git a/tree_of_thoughts/__init__.py b/tree_of_thoughts/__init__.py index d4bec01..9efeb16 100644 --- a/tree_of_thoughts/__init__.py +++ b/tree_of_thoughts/__init__.py @@ -1,7 +1,7 @@ from tree_of_thoughts.base import AbstractLanguageModel -from tree_of_thoughts.huggingface_model import ( - HuggingLanguageModel, -) +#from tree_of_thoughts.huggingface_model import ( + #HuggingLanguageModel, +#) from tree_of_thoughts.openai_models import ( OpenAILanguageModel, )