Skip to content

Commit aef1507

Browse files
Merge branch 'main' into gitauto/add-issue-templates-5ccb93b2-15d3-4362-b2eb-be15d92e8f20
2 parents a1be781 + 90849a1 commit aef1507

File tree

6 files changed

+365
-100
lines changed

6 files changed

+365
-100
lines changed

.github/workflows/codeql.yml

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
# For most projects, this workflow file will not need changing; you simply need
2+
# to commit it to your repository.
3+
#
4+
# You may wish to alter this file to override the set of languages analyzed,
5+
# or to provide custom queries or build logic.
6+
#
7+
# ******** NOTE ********
8+
# We have attempted to detect the languages in your repository. Please check
9+
# the `language` matrix defined below to confirm you have the correct set of
10+
# supported CodeQL languages.
11+
#
12+
name: "CodeQL Advanced"
13+
14+
on:
15+
push:
16+
branches: [ "main" ]
17+
pull_request:
18+
branches: [ "main" ]
19+
schedule:
20+
- cron: '31 17 * * 1'
21+
22+
jobs:
23+
analyze:
24+
name: Analyze (${{ matrix.language }})
25+
# Runner size impacts CodeQL analysis time. To learn more, please see:
26+
# - https://gh.io/recommended-hardware-resources-for-running-codeql
27+
# - https://gh.io/supported-runners-and-hardware-resources
28+
# - https://gh.io/using-larger-runners (GitHub.com only)
29+
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
30+
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
31+
permissions:
32+
# required for all workflows
33+
security-events: write
34+
35+
# required to fetch internal or private CodeQL packs
36+
packages: read
37+
38+
# only required for workflows in private repositories
39+
actions: read
40+
contents: read
41+
42+
strategy:
43+
fail-fast: false
44+
matrix:
45+
include:
46+
- language: python
47+
build-mode: none
48+
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
49+
# Use `c-cpp` to analyze code written in C, C++ or both
50+
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
51+
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
52+
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
53+
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
54+
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
55+
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
56+
steps:
57+
- name: Checkout repository
58+
uses: actions/checkout@v4
59+
60+
# Initializes the CodeQL tools for scanning.
61+
- name: Initialize CodeQL
62+
uses: github/codeql-action/init@v3
63+
with:
64+
languages: ${{ matrix.language }}
65+
build-mode: ${{ matrix.build-mode }}
66+
# If you wish to specify custom queries, you can do so here or in a config file.
67+
# By default, queries listed here will override any specified in a config file.
68+
# Prefix the list here with "+" to use these queries and those in the config file.
69+
70+
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
71+
# queries: security-extended,security-and-quality
72+
73+
# If the analyze step fails for one of the languages you are analyzing with
74+
# "We were unable to automatically build your code", modify the matrix above
75+
# to set the build mode to "manual" for that language. Then modify this step
76+
# to build your code.
77+
# ℹ️ Command-line programs to run using the OS shell.
78+
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
79+
- if: matrix.build-mode == 'manual'
80+
shell: bash
81+
run: |
82+
echo 'If you are using a "manual" build mode for one or more of the' \
83+
'languages you are analyzing, replace this with the commands to build' \
84+
'your code, for example:'
85+
echo ' make bootstrap'
86+
echo ' make release'
87+
exit 1
88+
89+
- name: Perform CodeQL Analysis
90+
uses: github/codeql-action/analyze@v3
91+
with:
92+
category: "/language:${{matrix.language}}"

Bash

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,7 @@ uvicorn app:app --host 0.0.0.0 --port 8000 &
1616
# **Optional:** Expose Server using ngrok (for Development)
1717
echo "### Exposing Server with ngrok (Comment out if not needed)..."
1818
# pip install pyngrok
19-
# from pyngrok import ngrok
20-
# public_url = ngrok.connect(8000)
21-
# echo "Public URL: ${public_url}"
19+
# python -c "from pyngrok import ngrok; public_url = ngrok.connect(8000); print('Public URL:', public_url)"
2220

2321
# **Section 3: Production Setup with SSL (Comment out for Development)**
2422
# echo "### Generating SSL Certificates (e.g., using Certbot)..."
@@ -49,9 +47,9 @@ curl -X POST "http://127.0.0.1:8000/speech-to-text/" -F "file=@path/to/audio.wav
4947
# **Section 6: Environment Management (Optional)**
5048
# echo "### Creating a Virtual Environment (Comment out if already set)..."
5149
# python -m venv path/to/venv
52-
# Activate based on your OS (Manual Step)
53-
# Windows: path\to\venv\Scripts\activate
54-
# Unix/Linux: source path/to/venv/bin/activate
50+
# # Activate based on your OS (Manual Step)
51+
# # Windows: path\to\venv\Scripts\activate
52+
# # Unix/Linux: source path/to/venv/bin/activate
5553

5654
# echo "### Installing Additional Tools (e.g., for Arch Linux)..."
5755
# sudo pacman -S python-pipx python-torch

JavaScript

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
async function fetchData() {
2+
try {
3+
const response = await fetch('https://your-fastapi-server.com/process-nlp/', {
4+
method: 'POST',
5+
headers: {
6+
'Content-Type': 'application/json',
7+
'Authorization': 'Bearer YvZz9Hni0hWJPh_UWW4dQYf9rhIe9nNYcC5ZQTTZz0Q' // Replace with your actual secure token
8+
},
9+
body: JSON.stringify({ text: "Hello FastAPI" })
10+
});
11+
const data = await response.json();
12+
document.getElementById('response').innerText = data.response;
13+
} catch (error) {
14+
document.getElementById('response').innerText = 'Error fetching data';
15+
}
16+
}
17+
18+
fetchData();

agi_pipeline.ipynb

Lines changed: 62 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
"colab": {
66
"private_outputs": true,
77
"provenance": [],
8-
"authorship_tag": "ABX9TyPJuZcVawsmdIfuIMLuRx2y",
8+
"authorship_tag": "ABX9TyPoHH519BuqGSnR/HON75UP",
99
"include_colab_link": true
1010
},
1111
"kernelspec": {
@@ -24,20 +24,23 @@
2424
"colab_type": "text"
2525
},
2626
"source": [
27-
"<a href=\"https://colab.research.google.com/github/OneFineStarstuff/AGI-Pipeline/blob/main/agi_pipeline.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
27+
"<a href=\"https://colab.research.google.com/github/OneFineStarstuff/OneFineStarstuff.github.io/blob/main/agi_pipeline.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
2828
]
2929
},
3030
{
3131
"cell_type": "code",
3232
"source": [
33+
"# === Imports ===\n",
3334
"import os\n",
3435
"import asyncio\n",
36+
"import time\n",
37+
"from typing import List\n",
3538
"import torch\n",
3639
"from transformers import T5Tokenizer, T5ForConditionalGeneration\n",
3740
"from PIL import Image\n",
38-
"from fastapi import FastAPI, UploadFile, Depends, HTTPException\n",
41+
"from fastapi import FastAPI, UploadFile, Depends, HTTPException, Request\n",
3942
"from fastapi.security import OAuth2PasswordBearer\n",
40-
"from pydantic import BaseModel\n",
43+
"from pydantic import BaseModel, SecretStr\n",
4144
"import whisper\n",
4245
"from ultralytics import YOLO\n",
4346
"import pyttsx3\n",
@@ -48,100 +51,74 @@
4851
"\n",
4952
"# === Logging Setup ===\n",
5053
"logger.add(\"pipeline_{time}.log\", rotation=\"1 MB\", level=\"DEBUG\", enqueue=True, backtrace=True, diagnose=True)\n",
54+
"logger.info(\"Application startup\")\n",
5155
"\n",
52-
"# === Environment Variables and Authentication ===\n",
53-
"SECURE_TOKEN = os.getenv(\"SECURE_TOKEN\", \"my_secure_token\")\n",
56+
"# === Security Enhancement: Environment Variable for Secure Token ===\n",
57+
"SECURE_TOKEN = SecretStr(os.getenv(\"SECURE_TOKEN\", \"YvZz9Hni0hWJPh_UWW4dQYf9rhIe9nNYcC5ZQTTZz0Q\"))\n",
58+
"\n",
59+
"# === OAuth2PasswordBearer for Authentication ===\n",
5460
"oauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"token\")\n",
5561
"\n",
62+
"# === Authentication Function ===\n",
5663
"def authenticate_user(token: str = Depends(oauth2_scheme)):\n",
57-
" if token != SECURE_TOKEN:\n",
64+
" if token != SECURE_TOKEN.get_secret_value():\n",
5865
" logger.warning(\"Authentication failed.\")\n",
5966
" raise HTTPException(status_code=401, detail=\"Invalid token\")\n",
6067
"\n",
61-
"# === Request and Response Models ===\n",
68+
"# === Request and Response Models (Pydantic) ===\n",
6269
"class TextRequest(BaseModel):\n",
6370
" text: str\n",
6471
"\n",
6572
"class TextResponse(BaseModel):\n",
6673
" response: str\n",
6774
"\n",
68-
"# === NLP Module ===\n",
75+
"# === NLP Module (T5 Transformer) ===\n",
6976
"class NLPModule:\n",
7077
" def __init__(self):\n",
7178
" model_name = \"google/flan-t5-small\"\n",
72-
" try:\n",
73-
" self.tokenizer = T5Tokenizer.from_pretrained(model_name)\n",
74-
" self.model = T5ForConditionalGeneration.from_pretrained(model_name)\n",
75-
" logger.info(\"NLP model loaded successfully.\")\n",
76-
" except Exception as e:\n",
77-
" logger.error(f\"Failed to load NLP model: {e}\")\n",
78-
" raise RuntimeError(\"Failed to load NLP model.\")\n",
79+
" self.tokenizer = T5Tokenizer.from_pretrained(model_name)\n",
80+
" self.model = T5ForConditionalGeneration.from_pretrained(model_name)\n",
81+
" logger.info(\"NLP model loaded successfully.\")\n",
7982
"\n",
8083
" def generate_text(self, prompt: str) -> str:\n",
8184
" if not prompt.strip():\n",
8285
" raise ValueError(\"Prompt cannot be empty.\")\n",
8386
" logger.debug(f\"Generating text for prompt: {prompt}\")\n",
84-
" try:\n",
85-
" inputs = self.tokenizer(prompt, return_tensors=\"pt\")\n",
86-
" outputs = self.model.generate(inputs[\"input_ids\"], max_length=100)\n",
87-
" response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
88-
" logger.info(f\"Generated response: {response}\")\n",
89-
" return response\n",
90-
" except Exception as e:\n",
91-
" logger.error(f\"Error in text generation: {e}\")\n",
92-
" raise RuntimeError(\"Text generation failed.\")\n",
93-
"\n",
94-
"# === CV Module with Object Detection ===\n",
87+
" inputs = self.tokenizer(prompt, return_tensors=\"pt\")\n",
88+
" outputs = self.model.generate(inputs[\"input_ids\"], max_length=100)\n",
89+
" response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)\n",
90+
" logger.info(f\"Generated response: {response}\")\n",
91+
" return response\n",
92+
"\n",
93+
"# === CV Module (YOLOv8 for Object Detection) ===\n",
9594
"class CVModule:\n",
9695
" def __init__(self):\n",
97-
" try:\n",
98-
" self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
99-
" self.model = YOLO('yolov5su.pt').to(self.device)\n",
100-
" logger.info(\"CV model loaded successfully.\")\n",
101-
" except Exception as e:\n",
102-
" logger.error(f\"Failed to load CV model: {e}\")\n",
103-
" raise RuntimeError(\"Failed to load CV model.\")\n",
96+
" self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
97+
" self.model = YOLO('yolov8n.pt').to(self.device)\n",
98+
" logger.info(\"CV model loaded successfully.\")\n",
10499
"\n",
105100
" def detect_objects(self, image: Image.Image) -> str:\n",
106101
" logger.debug(\"Detecting objects in the image.\")\n",
107-
" try:\n",
108-
" results = self.model(image)\n",
109-
" return results.pandas().xyxy[0].to_json()\n",
110-
" except Exception as e:\n",
111-
" logger.error(f\"Object detection failed: {e}\")\n",
112-
" raise ValueError(\"Object detection error.\")\n",
113-
"\n",
114-
"# === Speech Processor ===\n",
102+
" results = self.model(image)\n",
103+
" return results.pandas().xyxy[0].to_json()\n",
104+
"\n",
105+
"# === Speech Processor (Whisper for Speech-to-Text, PyTTSX3 for Text-to-Speech) ===\n",
115106
"class SpeechProcessor:\n",
116107
" def __init__(self):\n",
117-
" try:\n",
118-
" self.whisper_model = whisper.load_model(\"base\")\n",
119-
" self.tts = pyttsx3.init()\n",
120-
" logger.info(\"Speech processor initialized successfully.\")\n",
121-
" except Exception as e:\n",
122-
" logger.error(f\"Failed to initialize speech processor: {e}\")\n",
123-
" raise RuntimeError(\"Failed to initialize speech processor.\")\n",
108+
" self.whisper_model = whisper.load_model(\"base\")\n",
109+
" self.tts = pyttsx3.init()\n",
110+
" logger.info(\"Speech processor initialized successfully.\")\n",
124111
"\n",
125112
" def speech_to_text(self, audio_file: UploadFile) -> str:\n",
126-
" logger.debug(\"Processing speech-to-text.\")\n",
127-
" try:\n",
128-
" with audio_file.file as audio_data:\n",
129-
" result = self.whisper_model.transcribe(audio_data)\n",
113+
" with audio_file.file as audio_data:\n",
114+
" result = self.whisper_model.transcribe(audio_data)\n",
130115
" return result['text']\n",
131-
" except Exception as e:\n",
132-
" logger.error(f\"Speech-to-text failed: {e}\")\n",
133-
" raise ValueError(\"Speech-to-text error.\")\n",
134116
"\n",
135117
" def text_to_speech(self, text: str) -> None:\n",
136118
" if not text.strip():\n",
137119
" raise ValueError(\"Text cannot be empty.\")\n",
138-
" logger.debug(\"Processing text-to-speech.\")\n",
139-
" try:\n",
140-
" self.tts.say(text)\n",
141-
" self.tts.runAndWait()\n",
142-
" except Exception as e:\n",
143-
" logger.error(f\"Text-to-speech failed: {e}\")\n",
144-
" raise RuntimeError(\"Text-to-speech error.\")\n",
120+
" self.tts.say(text)\n",
121+
" self.tts.runAndWait()\n",
145122
"\n",
146123
" def __del__(self):\n",
147124
" self.tts.stop()\n",
@@ -154,7 +131,7 @@
154131
" self.speech_processor = SpeechProcessor()\n",
155132
"\n",
156133
" async def process_nlp(self, text: str) -> str:\n",
157-
" return self.nlp.generate_text(text)\n",
134+
" return await asyncio.to_thread(self.nlp.generate_text, text)\n",
158135
"\n",
159136
" async def process_cv(self, image: Image.Image) -> str:\n",
160137
" return await asyncio.to_thread(self.cv.detect_objects, image)\n",
@@ -167,58 +144,49 @@
167144
"\n",
168145
"# === FastAPI Application ===\n",
169146
"app = FastAPI()\n",
147+
"\n",
170148
"pipeline = EnhancedAGIPipeline()\n",
171149
"\n",
150+
"# === Endpoints ===\n",
172151
"@app.post(\"/process-nlp/\", response_model=TextResponse, dependencies=[Depends(authenticate_user)])\n",
173152
"async def process_nlp(request: TextRequest):\n",
174-
" try:\n",
175-
" response = await pipeline.process_nlp(request.text)\n",
176-
" logger.info(\"NLP processed successfully.\")\n",
177-
" return {\"response\": response}\n",
178-
" except Exception as e:\n",
179-
" logger.error(f\"NLP processing failed: {e}\")\n",
180-
" raise HTTPException(status_code=500, detail=\"NLP processing error.\")\n",
153+
" response = await pipeline.process_nlp(request.text)\n",
154+
" return {\"response\": response}\n",
181155
"\n",
182156
"@app.post(\"/process-cv-detection/\", dependencies=[Depends(authenticate_user)])\n",
183157
"async def process_cv_detection(file: UploadFile):\n",
184-
" try:\n",
158+
" image = Image.open(io.BytesIO(await file.read()))\n",
159+
" response = await pipeline.process_cv(image)\n",
160+
" return {\"detections\": response}\n",
161+
"\n",
162+
"@app.post(\"/batch-cv-detection/\", dependencies=[Depends(authenticate_user)])\n",
163+
"async def batch_cv_detection(files: List[UploadFile]):\n",
164+
" responses = []\n",
165+
" for file in files:\n",
185166
" image = Image.open(io.BytesIO(await file.read()))\n",
186167
" response = await pipeline.process_cv(image)\n",
187-
" logger.info(\"Object detection processed successfully.\")\n",
188-
" return {\"detections\": response}\n",
189-
" except Exception as e:\n",
190-
" logger.error(f\"Object detection failed: {e}\")\n",
191-
" raise HTTPException(status_code=500, detail=\"Object detection error.\")\n",
168+
" responses.append(response)\n",
169+
" return {\"batch_detections\": responses}\n",
192170
"\n",
193171
"@app.post(\"/speech-to-text/\", response_model=TextResponse, dependencies=[Depends(authenticate_user)])\n",
194172
"async def speech_to_text(file: UploadFile):\n",
195-
" try:\n",
196-
" response = await pipeline.process_speech_to_text(file)\n",
197-
" logger.info(\"Speech-to-text processed successfully.\")\n",
198-
" return {\"response\": response}\n",
199-
" except Exception as e:\n",
200-
" logger.error(f\"Speech-to-text failed: {e}\")\n",
201-
" raise HTTPException(status_code=500, detail=\"Speech-to-text error.\")\n",
173+
" response = await pipeline.process_speech_to_text(file)\n",
174+
" return {\"response\": response}\n",
202175
"\n",
203176
"@app.post(\"/text-to-speech/\", dependencies=[Depends(authenticate_user)])\n",
204177
"async def text_to_speech(request: TextRequest):\n",
205-
" try:\n",
206-
" await pipeline.process_text_to_speech(request.text)\n",
207-
" logger.info(\"Text-to-speech processed successfully.\")\n",
208-
" return {\"response\": \"Speech synthesis complete.\"}\n",
209-
" except Exception as e:\n",
210-
" logger.error(f\"Text-to-speech failed: {e}\")\n",
211-
" raise HTTPException(status_code=500, detail=\"Text-to-speech error.\")\n",
212-
"\n",
213-
"# === Run the Application with HTTPS ===\n",
178+
" await pipeline.process_text_to_speech(request.text)\n",
179+
" return {\"response\": \"Speech synthesis complete.\"}\n",
180+
"\n",
181+
"# === Run the Application with HTTPS (uvicorn) ===\n",
214182
"if __name__ == \"__main__\":\n",
215183
" nest_asyncio.apply()\n",
216184
" config = uvicorn.Config(app, host=\"0.0.0.0\", port=8000)\n",
217185
" server = uvicorn.Server(config)\n",
218186
" asyncio.run(server.serve())"
219187
],
220188
"metadata": {
221-
"id": "3yRf_BMYqzHJ"
189+
"id": "UgUAMujBWqGS"
222190
},
223191
"execution_count": null,
224192
"outputs": []

0 commit comments

Comments
 (0)